LLVM 19.0.0git
XCoreISelLowering.cpp
Go to the documentation of this file.
1//===-- XCoreISelLowering.cpp - XCore DAG Lowering Implementation ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the XCoreTargetLowering class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "XCoreISelLowering.h"
14#include "XCore.h"
16#include "XCoreSubtarget.h"
17#include "XCoreTargetMachine.h"
26#include "llvm/IR/CallingConv.h"
27#include "llvm/IR/Constants.h"
29#include "llvm/IR/Function.h"
30#include "llvm/IR/GlobalAlias.h"
32#include "llvm/IR/Intrinsics.h"
33#include "llvm/IR/IntrinsicsXCore.h"
34#include "llvm/Support/Debug.h"
38#include <algorithm>
39
40using namespace llvm;
41
42#define DEBUG_TYPE "xcore-lower"
43
45getTargetNodeName(unsigned Opcode) const
46{
47 switch ((XCoreISD::NodeType)Opcode)
48 {
49 case XCoreISD::FIRST_NUMBER : break;
50 case XCoreISD::BL : return "XCoreISD::BL";
51 case XCoreISD::PCRelativeWrapper : return "XCoreISD::PCRelativeWrapper";
52 case XCoreISD::DPRelativeWrapper : return "XCoreISD::DPRelativeWrapper";
53 case XCoreISD::CPRelativeWrapper : return "XCoreISD::CPRelativeWrapper";
54 case XCoreISD::LDWSP : return "XCoreISD::LDWSP";
55 case XCoreISD::STWSP : return "XCoreISD::STWSP";
56 case XCoreISD::RETSP : return "XCoreISD::RETSP";
57 case XCoreISD::LADD : return "XCoreISD::LADD";
58 case XCoreISD::LSUB : return "XCoreISD::LSUB";
59 case XCoreISD::LMUL : return "XCoreISD::LMUL";
60 case XCoreISD::MACCU : return "XCoreISD::MACCU";
61 case XCoreISD::MACCS : return "XCoreISD::MACCS";
62 case XCoreISD::CRC8 : return "XCoreISD::CRC8";
63 case XCoreISD::BR_JT : return "XCoreISD::BR_JT";
64 case XCoreISD::BR_JT32 : return "XCoreISD::BR_JT32";
65 case XCoreISD::FRAME_TO_ARGS_OFFSET : return "XCoreISD::FRAME_TO_ARGS_OFFSET";
66 case XCoreISD::EH_RETURN : return "XCoreISD::EH_RETURN";
67 }
68 return nullptr;
69}
70
72 const XCoreSubtarget &Subtarget)
73 : TargetLowering(TM), TM(TM), Subtarget(Subtarget) {
74
75 // Set up the register classes.
76 addRegisterClass(MVT::i32, &XCore::GRRegsRegClass);
77
78 // Compute derived properties from the register classes
80
82
84
85 // Use i32 for setcc operations results (slt, sgt, ...).
87 setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct?
88
89 // XCore does not have the NodeTypes below.
92
93 // 64bit
103
104 // Bit Manipulation
109
110 setOperationAction(ISD::TRAP, MVT::Other, Legal);
111
112 // Jump tables.
114
117
118 // Conversion of i64 -> double produces constantpool nodes
120
121 // Loads
122 for (MVT VT : MVT::integer_valuetypes()) {
126
128 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i16, Expand);
129 }
130
131 // Custom expand misaligned loads / stores.
134
135 // Varargs
140
141 // Dynamic stack
145
146 // Exception handling
149
151
152 // TRAMPOLINE is custom lowered.
155
156 // We want to custom lower some of our intrinsics.
158
162
163 // We have target-specific dag combine patterns for the following nodes:
166
169
170 // This target doesn't implement native atomics.
172}
173
175 if (Val.getOpcode() != ISD::LOAD)
176 return false;
177
178 EVT VT1 = Val.getValueType();
179 if (!VT1.isSimple() || !VT1.isInteger() ||
180 !VT2.isSimple() || !VT2.isInteger())
181 return false;
182
183 switch (VT1.getSimpleVT().SimpleTy) {
184 default: break;
185 case MVT::i8:
186 return true;
187 }
188
189 return false;
190}
191
194 switch (Op.getOpcode())
195 {
196 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
197 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
198 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
199 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
200 case ISD::BR_JT: return LowerBR_JT(Op, DAG);
201 case ISD::LOAD: return LowerLOAD(Op, DAG);
202 case ISD::STORE: return LowerSTORE(Op, DAG);
203 case ISD::VAARG: return LowerVAARG(Op, DAG);
204 case ISD::VASTART: return LowerVASTART(Op, DAG);
205 case ISD::SMUL_LOHI: return LowerSMUL_LOHI(Op, DAG);
206 case ISD::UMUL_LOHI: return LowerUMUL_LOHI(Op, DAG);
207 // FIXME: Remove these when LegalizeDAGTypes lands.
208 case ISD::ADD:
209 case ISD::SUB: return ExpandADDSUB(Op.getNode(), DAG);
210 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
211 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
212 case ISD::FRAME_TO_ARGS_OFFSET: return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
213 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
214 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
215 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
217 return LowerATOMIC_FENCE(Op, DAG);
218 default:
219 llvm_unreachable("unimplemented operand");
220 }
221}
222
223/// ReplaceNodeResults - Replace the results of node with an illegal result
224/// type with new values built out of custom code.
227 SelectionDAG &DAG) const {
228 switch (N->getOpcode()) {
229 default:
230 llvm_unreachable("Don't know how to custom expand this!");
231 case ISD::ADD:
232 case ISD::SUB:
233 Results.push_back(ExpandADDSUB(N, DAG));
234 return;
235 }
236}
237
238//===----------------------------------------------------------------------===//
239// Misc Lower Operation implementation
240//===----------------------------------------------------------------------===//
241
242SDValue XCoreTargetLowering::getGlobalAddressWrapper(SDValue GA,
243 const GlobalValue *GV,
244 SelectionDAG &DAG) const {
245 // FIXME there is no actual debug info here
246 SDLoc dl(GA);
247
248 if (GV->getValueType()->isFunctionTy())
249 return DAG.getNode(XCoreISD::PCRelativeWrapper, dl, MVT::i32, GA);
250
251 const auto *GVar = dyn_cast<GlobalVariable>(GV);
252 if ((GV->hasSection() && GV->getSection().starts_with(".cp.")) ||
253 (GVar && GVar->isConstant() && GV->hasLocalLinkage()))
254 return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, GA);
255
256 return DAG.getNode(XCoreISD::DPRelativeWrapper, dl, MVT::i32, GA);
257}
258
259static bool IsSmallObject(const GlobalValue *GV, const XCoreTargetLowering &XTL) {
261 return true;
262
263 Type *ObjType = GV->getValueType();
264 if (!ObjType->isSized())
265 return false;
266
267 auto &DL = GV->getParent()->getDataLayout();
268 unsigned ObjSize = DL.getTypeAllocSize(ObjType);
269 return ObjSize < CodeModelLargeSize && ObjSize != 0;
270}
271
272SDValue XCoreTargetLowering::
273LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const
274{
275 const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op);
276 const GlobalValue *GV = GN->getGlobal();
277 SDLoc DL(GN);
278 int64_t Offset = GN->getOffset();
279 if (IsSmallObject(GV, *this)) {
280 // We can only fold positive offsets that are a multiple of the word size.
281 int64_t FoldedOffset = std::max(Offset & ~3, (int64_t)0);
282 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, FoldedOffset);
283 GA = getGlobalAddressWrapper(GA, GV, DAG);
284 // Handle the rest of the offset.
285 if (Offset != FoldedOffset) {
286 SDValue Remaining = DAG.getConstant(Offset - FoldedOffset, DL, MVT::i32);
287 GA = DAG.getNode(ISD::ADD, DL, MVT::i32, GA, Remaining);
288 }
289 return GA;
290 } else {
291 // Ideally we would not fold in offset with an index <= 11.
292 Type *Ty = Type::getInt32Ty(*DAG.getContext());
293 Constant *Idx = ConstantInt::get(Ty, Offset);
295 Type::getInt8Ty(*DAG.getContext()), const_cast<GlobalValue *>(GV), Idx);
296 SDValue CP = DAG.getConstantPool(GAI, MVT::i32);
297 return DAG.getLoad(getPointerTy(DAG.getDataLayout()), DL,
298 DAG.getEntryNode(), CP, MachinePointerInfo());
299 }
300}
301
302SDValue XCoreTargetLowering::
303LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const
304{
305 SDLoc DL(Op);
306 auto PtrVT = getPointerTy(DAG.getDataLayout());
307 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
308 SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT);
309
310 return DAG.getNode(XCoreISD::PCRelativeWrapper, DL, PtrVT, Result);
311}
312
313SDValue XCoreTargetLowering::
314LowerConstantPool(SDValue Op, SelectionDAG &DAG) const
315{
316 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
317 // FIXME there isn't really debug info here
318 SDLoc dl(CP);
319 EVT PtrVT = Op.getValueType();
320 SDValue Res;
321 if (CP->isMachineConstantPoolEntry()) {
322 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
323 CP->getAlign(), CP->getOffset());
324 } else {
325 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, CP->getAlign(),
326 CP->getOffset());
327 }
328 return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, Res);
329}
330
333}
334
335SDValue XCoreTargetLowering::
336LowerBR_JT(SDValue Op, SelectionDAG &DAG) const
337{
338 SDValue Chain = Op.getOperand(0);
339 SDValue Table = Op.getOperand(1);
340 SDValue Index = Op.getOperand(2);
341 SDLoc dl(Op);
342 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
343 unsigned JTI = JT->getIndex();
345 const MachineJumpTableInfo *MJTI = MF.getJumpTableInfo();
346 SDValue TargetJT = DAG.getTargetJumpTable(JT->getIndex(), MVT::i32);
347
348 unsigned NumEntries = MJTI->getJumpTables()[JTI].MBBs.size();
349 if (NumEntries <= 32) {
350 return DAG.getNode(XCoreISD::BR_JT, dl, MVT::Other, Chain, TargetJT, Index);
351 }
352 assert((NumEntries >> 31) == 0);
353 SDValue ScaledIndex = DAG.getNode(ISD::SHL, dl, MVT::i32, Index,
354 DAG.getConstant(1, dl, MVT::i32));
355 return DAG.getNode(XCoreISD::BR_JT32, dl, MVT::Other, Chain, TargetJT,
356 ScaledIndex);
357}
358
359SDValue XCoreTargetLowering::lowerLoadWordFromAlignedBasePlusOffset(
360 const SDLoc &DL, SDValue Chain, SDValue Base, int64_t Offset,
361 SelectionDAG &DAG) const {
362 auto PtrVT = getPointerTy(DAG.getDataLayout());
363 if ((Offset & 0x3) == 0) {
364 return DAG.getLoad(PtrVT, DL, Chain, Base, MachinePointerInfo());
365 }
366 // Lower to pair of consecutive word aligned loads plus some bit shifting.
367 int32_t HighOffset = alignTo(Offset, 4);
368 int32_t LowOffset = HighOffset - 4;
369 SDValue LowAddr, HighAddr;
370 if (GlobalAddressSDNode *GASD =
371 dyn_cast<GlobalAddressSDNode>(Base.getNode())) {
372 LowAddr = DAG.getGlobalAddress(GASD->getGlobal(), DL, Base.getValueType(),
373 LowOffset);
374 HighAddr = DAG.getGlobalAddress(GASD->getGlobal(), DL, Base.getValueType(),
375 HighOffset);
376 } else {
377 LowAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base,
378 DAG.getConstant(LowOffset, DL, MVT::i32));
379 HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base,
380 DAG.getConstant(HighOffset, DL, MVT::i32));
381 }
382 SDValue LowShift = DAG.getConstant((Offset - LowOffset) * 8, DL, MVT::i32);
383 SDValue HighShift = DAG.getConstant((HighOffset - Offset) * 8, DL, MVT::i32);
384
385 SDValue Low = DAG.getLoad(PtrVT, DL, Chain, LowAddr, MachinePointerInfo());
386 SDValue High = DAG.getLoad(PtrVT, DL, Chain, HighAddr, MachinePointerInfo());
387 SDValue LowShifted = DAG.getNode(ISD::SRL, DL, MVT::i32, Low, LowShift);
388 SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High, HighShift);
389 SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, LowShifted, HighShifted);
390 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1),
391 High.getValue(1));
392 SDValue Ops[] = { Result, Chain };
393 return DAG.getMergeValues(Ops, DL);
394}
395
397{
398 KnownBits Known = DAG.computeKnownBits(Value);
399 return Known.countMinTrailingZeros() >= 2;
400}
401
402SDValue XCoreTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
403 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
405 LoadSDNode *LD = cast<LoadSDNode>(Op);
406 assert(LD->getExtensionType() == ISD::NON_EXTLOAD &&
407 "Unexpected extension type");
408 assert(LD->getMemoryVT() == MVT::i32 && "Unexpected load EVT");
409
411 LD->getMemoryVT(), *LD->getMemOperand()))
412 return SDValue();
413
414 SDValue Chain = LD->getChain();
415 SDValue BasePtr = LD->getBasePtr();
416 SDLoc DL(Op);
417
418 if (!LD->isVolatile()) {
419 const GlobalValue *GV;
420 int64_t Offset = 0;
421 if (DAG.isBaseWithConstantOffset(BasePtr) &&
422 isWordAligned(BasePtr->getOperand(0), DAG)) {
423 SDValue NewBasePtr = BasePtr->getOperand(0);
424 Offset = cast<ConstantSDNode>(BasePtr->getOperand(1))->getSExtValue();
425 return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, NewBasePtr,
426 Offset, DAG);
427 }
428 if (TLI.isGAPlusOffset(BasePtr.getNode(), GV, Offset) &&
429 GV->getPointerAlignment(DAG.getDataLayout()) >= 4) {
430 SDValue NewBasePtr = DAG.getGlobalAddress(GV, DL,
431 BasePtr->getValueType(0));
432 return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, NewBasePtr,
433 Offset, DAG);
434 }
435 }
436
437 if (LD->getAlign() == Align(2)) {
438 SDValue Low = DAG.getExtLoad(ISD::ZEXTLOAD, DL, MVT::i32, Chain, BasePtr,
439 LD->getPointerInfo(), MVT::i16, Align(2),
440 LD->getMemOperand()->getFlags());
441 SDValue HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
442 DAG.getConstant(2, DL, MVT::i32));
443 SDValue High =
444 DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain, HighAddr,
445 LD->getPointerInfo().getWithOffset(2), MVT::i16,
446 Align(2), LD->getMemOperand()->getFlags());
447 SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High,
448 DAG.getConstant(16, DL, MVT::i32));
449 SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, Low, HighShifted);
450 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1),
451 High.getValue(1));
452 SDValue Ops[] = { Result, Chain };
453 return DAG.getMergeValues(Ops, DL);
454 }
455
456 // Lower to a call to __misaligned_load(BasePtr).
457 Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(Context);
460
461 Entry.Ty = IntPtrTy;
462 Entry.Node = BasePtr;
463 Args.push_back(Entry);
464
466 CLI.setDebugLoc(DL).setChain(Chain).setLibCallee(
467 CallingConv::C, IntPtrTy,
468 DAG.getExternalSymbol("__misaligned_load",
470 std::move(Args));
471
472 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
473 SDValue Ops[] = { CallResult.first, CallResult.second };
474 return DAG.getMergeValues(Ops, DL);
475}
476
477SDValue XCoreTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
479 StoreSDNode *ST = cast<StoreSDNode>(Op);
480 assert(!ST->isTruncatingStore() && "Unexpected store type");
481 assert(ST->getMemoryVT() == MVT::i32 && "Unexpected store EVT");
482
484 ST->getMemoryVT(), *ST->getMemOperand()))
485 return SDValue();
486
487 SDValue Chain = ST->getChain();
488 SDValue BasePtr = ST->getBasePtr();
489 SDValue Value = ST->getValue();
490 SDLoc dl(Op);
491
492 if (ST->getAlign() == Align(2)) {
493 SDValue Low = Value;
494 SDValue High = DAG.getNode(ISD::SRL, dl, MVT::i32, Value,
495 DAG.getConstant(16, dl, MVT::i32));
496 SDValue StoreLow =
497 DAG.getTruncStore(Chain, dl, Low, BasePtr, ST->getPointerInfo(),
498 MVT::i16, Align(2), ST->getMemOperand()->getFlags());
499 SDValue HighAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, BasePtr,
500 DAG.getConstant(2, dl, MVT::i32));
501 SDValue StoreHigh = DAG.getTruncStore(
502 Chain, dl, High, HighAddr, ST->getPointerInfo().getWithOffset(2),
503 MVT::i16, Align(2), ST->getMemOperand()->getFlags());
504 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, StoreLow, StoreHigh);
505 }
506
507 // Lower to a call to __misaligned_store(BasePtr, Value).
508 Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(Context);
511
512 Entry.Ty = IntPtrTy;
513 Entry.Node = BasePtr;
514 Args.push_back(Entry);
515
516 Entry.Node = Value;
517 Args.push_back(Entry);
518
520 CLI.setDebugLoc(dl).setChain(Chain).setCallee(
522 DAG.getExternalSymbol("__misaligned_store",
524 std::move(Args));
525
526 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
527 return CallResult.second;
528}
529
530SDValue XCoreTargetLowering::
531LowerSMUL_LOHI(SDValue Op, SelectionDAG &DAG) const
532{
533 assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::SMUL_LOHI &&
534 "Unexpected operand to lower!");
535 SDLoc dl(Op);
536 SDValue LHS = Op.getOperand(0);
537 SDValue RHS = Op.getOperand(1);
538 SDValue Zero = DAG.getConstant(0, dl, MVT::i32);
540 DAG.getVTList(MVT::i32, MVT::i32), Zero, Zero,
541 LHS, RHS);
542 SDValue Lo(Hi.getNode(), 1);
543 SDValue Ops[] = { Lo, Hi };
544 return DAG.getMergeValues(Ops, dl);
545}
546
547SDValue XCoreTargetLowering::
548LowerUMUL_LOHI(SDValue Op, SelectionDAG &DAG) const
549{
550 assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::UMUL_LOHI &&
551 "Unexpected operand to lower!");
552 SDLoc dl(Op);
553 SDValue LHS = Op.getOperand(0);
554 SDValue RHS = Op.getOperand(1);
555 SDValue Zero = DAG.getConstant(0, dl, MVT::i32);
557 DAG.getVTList(MVT::i32, MVT::i32), LHS, RHS,
558 Zero, Zero);
559 SDValue Lo(Hi.getNode(), 1);
560 SDValue Ops[] = { Lo, Hi };
561 return DAG.getMergeValues(Ops, dl);
562}
563
564/// isADDADDMUL - Return whether Op is in a form that is equivalent to
565/// add(add(mul(x,y),a),b). If requireIntermediatesHaveOneUse is true then
566/// each intermediate result in the calculation must also have a single use.
567/// If the Op is in the correct form the constituent parts are written to Mul0,
568/// Mul1, Addend0 and Addend1.
569static bool
570isADDADDMUL(SDValue Op, SDValue &Mul0, SDValue &Mul1, SDValue &Addend0,
571 SDValue &Addend1, bool requireIntermediatesHaveOneUse)
572{
573 if (Op.getOpcode() != ISD::ADD)
574 return false;
575 SDValue N0 = Op.getOperand(0);
576 SDValue N1 = Op.getOperand(1);
577 SDValue AddOp;
578 SDValue OtherOp;
579 if (N0.getOpcode() == ISD::ADD) {
580 AddOp = N0;
581 OtherOp = N1;
582 } else if (N1.getOpcode() == ISD::ADD) {
583 AddOp = N1;
584 OtherOp = N0;
585 } else {
586 return false;
587 }
588 if (requireIntermediatesHaveOneUse && !AddOp.hasOneUse())
589 return false;
590 if (OtherOp.getOpcode() == ISD::MUL) {
591 // add(add(a,b),mul(x,y))
592 if (requireIntermediatesHaveOneUse && !OtherOp.hasOneUse())
593 return false;
594 Mul0 = OtherOp.getOperand(0);
595 Mul1 = OtherOp.getOperand(1);
596 Addend0 = AddOp.getOperand(0);
597 Addend1 = AddOp.getOperand(1);
598 return true;
599 }
600 if (AddOp.getOperand(0).getOpcode() == ISD::MUL) {
601 // add(add(mul(x,y),a),b)
602 if (requireIntermediatesHaveOneUse && !AddOp.getOperand(0).hasOneUse())
603 return false;
604 Mul0 = AddOp.getOperand(0).getOperand(0);
605 Mul1 = AddOp.getOperand(0).getOperand(1);
606 Addend0 = AddOp.getOperand(1);
607 Addend1 = OtherOp;
608 return true;
609 }
610 if (AddOp.getOperand(1).getOpcode() == ISD::MUL) {
611 // add(add(a,mul(x,y)),b)
612 if (requireIntermediatesHaveOneUse && !AddOp.getOperand(1).hasOneUse())
613 return false;
614 Mul0 = AddOp.getOperand(1).getOperand(0);
615 Mul1 = AddOp.getOperand(1).getOperand(1);
616 Addend0 = AddOp.getOperand(0);
617 Addend1 = OtherOp;
618 return true;
619 }
620 return false;
621}
622
623SDValue XCoreTargetLowering::
624TryExpandADDWithMul(SDNode *N, SelectionDAG &DAG) const
625{
626 SDValue Mul;
628 if (N->getOperand(0).getOpcode() == ISD::MUL) {
629 Mul = N->getOperand(0);
630 Other = N->getOperand(1);
631 } else if (N->getOperand(1).getOpcode() == ISD::MUL) {
632 Mul = N->getOperand(1);
633 Other = N->getOperand(0);
634 } else {
635 return SDValue();
636 }
637 SDLoc dl(N);
638 SDValue LL, RL, AddendL, AddendH;
639 LL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
640 Mul.getOperand(0), DAG.getConstant(0, dl, MVT::i32));
641 RL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
642 Mul.getOperand(1), DAG.getConstant(0, dl, MVT::i32));
643 AddendL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
644 Other, DAG.getConstant(0, dl, MVT::i32));
645 AddendH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
646 Other, DAG.getConstant(1, dl, MVT::i32));
647 APInt HighMask = APInt::getHighBitsSet(64, 32);
648 unsigned LHSSB = DAG.ComputeNumSignBits(Mul.getOperand(0));
649 unsigned RHSSB = DAG.ComputeNumSignBits(Mul.getOperand(1));
650 if (DAG.MaskedValueIsZero(Mul.getOperand(0), HighMask) &&
651 DAG.MaskedValueIsZero(Mul.getOperand(1), HighMask)) {
652 // The inputs are both zero-extended.
654 DAG.getVTList(MVT::i32, MVT::i32), AddendH,
655 AddendL, LL, RL);
656 SDValue Lo(Hi.getNode(), 1);
657 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
658 }
659 if (LHSSB > 32 && RHSSB > 32) {
660 // The inputs are both sign-extended.
662 DAG.getVTList(MVT::i32, MVT::i32), AddendH,
663 AddendL, LL, RL);
664 SDValue Lo(Hi.getNode(), 1);
665 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
666 }
667 SDValue LH, RH;
668 LH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
669 Mul.getOperand(0), DAG.getConstant(1, dl, MVT::i32));
670 RH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
671 Mul.getOperand(1), DAG.getConstant(1, dl, MVT::i32));
673 DAG.getVTList(MVT::i32, MVT::i32), AddendH,
674 AddendL, LL, RL);
675 SDValue Lo(Hi.getNode(), 1);
676 RH = DAG.getNode(ISD::MUL, dl, MVT::i32, LL, RH);
677 LH = DAG.getNode(ISD::MUL, dl, MVT::i32, LH, RL);
678 Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, RH);
679 Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, LH);
680 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
681}
682
683SDValue XCoreTargetLowering::
684ExpandADDSUB(SDNode *N, SelectionDAG &DAG) const
685{
686 assert(N->getValueType(0) == MVT::i64 &&
687 (N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) &&
688 "Unknown operand to lower!");
689
690 if (N->getOpcode() == ISD::ADD)
691 if (SDValue Result = TryExpandADDWithMul(N, DAG))
692 return Result;
693
694 SDLoc dl(N);
695
696 // Extract components
697 SDValue LHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
698 N->getOperand(0),
699 DAG.getConstant(0, dl, MVT::i32));
700 SDValue LHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
701 N->getOperand(0),
702 DAG.getConstant(1, dl, MVT::i32));
703 SDValue RHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
704 N->getOperand(1),
705 DAG.getConstant(0, dl, MVT::i32));
706 SDValue RHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
707 N->getOperand(1),
708 DAG.getConstant(1, dl, MVT::i32));
709
710 // Expand
711 unsigned Opcode = (N->getOpcode() == ISD::ADD) ? XCoreISD::LADD :
713 SDValue Zero = DAG.getConstant(0, dl, MVT::i32);
714 SDValue Lo = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
715 LHSL, RHSL, Zero);
716 SDValue Carry(Lo.getNode(), 1);
717
718 SDValue Hi = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
719 LHSH, RHSH, Carry);
720 SDValue Ignored(Hi.getNode(), 1);
721 // Merge the pieces
722 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
723}
724
725SDValue XCoreTargetLowering::
726LowerVAARG(SDValue Op, SelectionDAG &DAG) const
727{
728 // Whist llvm does not support aggregate varargs we can ignore
729 // the possibility of the ValueType being an implicit byVal vararg.
730 SDNode *Node = Op.getNode();
731 EVT VT = Node->getValueType(0); // not an aggregate
732 SDValue InChain = Node->getOperand(0);
733 SDValue VAListPtr = Node->getOperand(1);
734 EVT PtrVT = VAListPtr.getValueType();
735 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
736 SDLoc dl(Node);
737 SDValue VAList =
738 DAG.getLoad(PtrVT, dl, InChain, VAListPtr, MachinePointerInfo(SV));
739 // Increment the pointer, VAList, to the next vararg
740 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAList,
741 DAG.getIntPtrConstant(VT.getSizeInBits() / 8,
742 dl));
743 // Store the incremented VAList to the legalized pointer
744 InChain = DAG.getStore(VAList.getValue(1), dl, nextPtr, VAListPtr,
746 // Load the actual argument out of the pointer VAList
747 return DAG.getLoad(VT, dl, InChain, VAList, MachinePointerInfo());
748}
749
750SDValue XCoreTargetLowering::
751LowerVASTART(SDValue Op, SelectionDAG &DAG) const
752{
753 SDLoc dl(Op);
754 // vastart stores the address of the VarArgsFrameIndex slot into the
755 // memory location argument
758 SDValue Addr = DAG.getFrameIndex(XFI->getVarArgsFrameIndex(), MVT::i32);
759 return DAG.getStore(Op.getOperand(0), dl, Addr, Op.getOperand(1),
761}
762
763SDValue XCoreTargetLowering::LowerFRAMEADDR(SDValue Op,
764 SelectionDAG &DAG) const {
765 // This nodes represent llvm.frameaddress on the DAG.
766 // It takes one operand, the index of the frame address to return.
767 // An index of zero corresponds to the current function's frame address.
768 // An index of one to the parent's frame address, and so on.
769 // Depths > 0 not supported yet!
770 if (Op.getConstantOperandVal(0) > 0)
771 return SDValue();
772
774 const TargetRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
775 return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op),
776 RegInfo->getFrameRegister(MF), MVT::i32);
777}
778
779SDValue XCoreTargetLowering::
780LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const {
781 // This nodes represent llvm.returnaddress on the DAG.
782 // It takes one operand, the index of the return address to return.
783 // An index of zero corresponds to the current function's return address.
784 // An index of one to the parent's return address, and so on.
785 // Depths > 0 not supported yet!
786 if (Op.getConstantOperandVal(0) > 0)
787 return SDValue();
788
791 int FI = XFI->createLRSpillSlot(MF);
792 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
793 return DAG.getLoad(getPointerTy(DAG.getDataLayout()), SDLoc(Op),
794 DAG.getEntryNode(), FIN,
796}
797
798SDValue XCoreTargetLowering::
799LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const {
800 // This node represents offset from frame pointer to first on-stack argument.
801 // This is needed for correct stack adjustment during unwind.
802 // However, we don't know the offset until after the frame has be finalised.
803 // This is done during the XCoreFTAOElim pass.
804 return DAG.getNode(XCoreISD::FRAME_TO_ARGS_OFFSET, SDLoc(Op), MVT::i32);
805}
806
807SDValue XCoreTargetLowering::
808LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
809 // OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER)
810 // This node represents 'eh_return' gcc dwarf builtin, which is used to
811 // return from exception. The general meaning is: adjust stack by OFFSET and
812 // pass execution to HANDLER.
814 SDValue Chain = Op.getOperand(0);
815 SDValue Offset = Op.getOperand(1);
816 SDValue Handler = Op.getOperand(2);
817 SDLoc dl(Op);
818
819 // Absolute SP = (FP + FrameToArgs) + Offset
820 const TargetRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
822 RegInfo->getFrameRegister(MF), MVT::i32);
823 SDValue FrameToArgs = DAG.getNode(XCoreISD::FRAME_TO_ARGS_OFFSET, dl,
824 MVT::i32);
825 Stack = DAG.getNode(ISD::ADD, dl, MVT::i32, Stack, FrameToArgs);
826 Stack = DAG.getNode(ISD::ADD, dl, MVT::i32, Stack, Offset);
827
828 // R0=ExceptionPointerRegister R1=ExceptionSelectorRegister
829 // which leaves 2 caller saved registers, R2 & R3 for us to use.
830 unsigned StackReg = XCore::R2;
831 unsigned HandlerReg = XCore::R3;
832
833 SDValue OutChains[] = {
834 DAG.getCopyToReg(Chain, dl, StackReg, Stack),
835 DAG.getCopyToReg(Chain, dl, HandlerReg, Handler)
836 };
837
838 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
839
840 return DAG.getNode(XCoreISD::EH_RETURN, dl, MVT::Other, Chain,
841 DAG.getRegister(StackReg, MVT::i32),
842 DAG.getRegister(HandlerReg, MVT::i32));
843
844}
845
846SDValue XCoreTargetLowering::
847LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const {
848 return Op.getOperand(0);
849}
850
851SDValue XCoreTargetLowering::
852LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const {
853 SDValue Chain = Op.getOperand(0);
854 SDValue Trmp = Op.getOperand(1); // trampoline
855 SDValue FPtr = Op.getOperand(2); // nested function
856 SDValue Nest = Op.getOperand(3); // 'nest' parameter value
857
858 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
859
860 // .align 4
861 // LDAPF_u10 r11, nest
862 // LDW_2rus r11, r11[0]
863 // STWSP_ru6 r11, sp[0]
864 // LDAPF_u10 r11, fptr
865 // LDW_2rus r11, r11[0]
866 // BAU_1r r11
867 // nest:
868 // .word nest
869 // fptr:
870 // .word fptr
871 SDValue OutChains[5];
872
873 SDValue Addr = Trmp;
874
875 SDLoc dl(Op);
876 OutChains[0] =
877 DAG.getStore(Chain, dl, DAG.getConstant(0x0a3cd805, dl, MVT::i32), Addr,
878 MachinePointerInfo(TrmpAddr));
879
880 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
881 DAG.getConstant(4, dl, MVT::i32));
882 OutChains[1] =
883 DAG.getStore(Chain, dl, DAG.getConstant(0xd80456c0, dl, MVT::i32), Addr,
884 MachinePointerInfo(TrmpAddr, 4));
885
886 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
887 DAG.getConstant(8, dl, MVT::i32));
888 OutChains[2] =
889 DAG.getStore(Chain, dl, DAG.getConstant(0x27fb0a3c, dl, MVT::i32), Addr,
890 MachinePointerInfo(TrmpAddr, 8));
891
892 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
893 DAG.getConstant(12, dl, MVT::i32));
894 OutChains[3] =
895 DAG.getStore(Chain, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 12));
896
897 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
898 DAG.getConstant(16, dl, MVT::i32));
899 OutChains[4] =
900 DAG.getStore(Chain, dl, FPtr, Addr, MachinePointerInfo(TrmpAddr, 16));
901
902 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
903}
904
905SDValue XCoreTargetLowering::
906LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const {
907 SDLoc DL(Op);
908 unsigned IntNo = Op.getConstantOperandVal(0);
909 switch (IntNo) {
910 case Intrinsic::xcore_crc8:
911 EVT VT = Op.getValueType();
912 SDValue Data =
913 DAG.getNode(XCoreISD::CRC8, DL, DAG.getVTList(VT, VT),
914 Op.getOperand(1), Op.getOperand(2) , Op.getOperand(3));
915 SDValue Crc(Data.getNode(), 1);
916 SDValue Results[] = { Crc, Data };
917 return DAG.getMergeValues(Results, DL);
918 }
919 return SDValue();
920}
921
922SDValue XCoreTargetLowering::
923LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const {
924 SDLoc DL(Op);
925 return DAG.getNode(ISD::MEMBARRIER, DL, MVT::Other, Op.getOperand(0));
926}
927
928//===----------------------------------------------------------------------===//
929// Calling Convention Implementation
930//===----------------------------------------------------------------------===//
931
932#include "XCoreGenCallingConv.inc"
933
934//===----------------------------------------------------------------------===//
935// Call Calling Convention Implementation
936//===----------------------------------------------------------------------===//
937
938/// XCore call implementation
940XCoreTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
941 SmallVectorImpl<SDValue> &InVals) const {
942 SelectionDAG &DAG = CLI.DAG;
943 SDLoc &dl = CLI.DL;
945 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
947 SDValue Chain = CLI.Chain;
948 SDValue Callee = CLI.Callee;
949 bool &isTailCall = CLI.IsTailCall;
950 CallingConv::ID CallConv = CLI.CallConv;
951 bool isVarArg = CLI.IsVarArg;
952
953 // XCore target does not yet support tail call optimization.
954 isTailCall = false;
955
956 // For now, only CallingConv::C implemented
957 switch (CallConv)
958 {
959 default:
960 report_fatal_error("Unsupported calling convention");
962 case CallingConv::C:
963 return LowerCCCCallTo(Chain, Callee, CallConv, isVarArg, isTailCall,
964 Outs, OutVals, Ins, dl, DAG, InVals);
965 }
966}
967
968/// LowerCallResult - Lower the result values of a call into the
969/// appropriate copies out of appropriate physical registers / memory locations.
971 const SmallVectorImpl<CCValAssign> &RVLocs,
972 const SDLoc &dl, SelectionDAG &DAG,
973 SmallVectorImpl<SDValue> &InVals) {
974 SmallVector<std::pair<int, unsigned>, 4> ResultMemLocs;
975 // Copy results out of physical registers.
976 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
977 const CCValAssign &VA = RVLocs[i];
978 if (VA.isRegLoc()) {
979 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getValVT(),
980 InGlue).getValue(1);
981 InGlue = Chain.getValue(2);
982 InVals.push_back(Chain.getValue(0));
983 } else {
984 assert(VA.isMemLoc());
985 ResultMemLocs.push_back(std::make_pair(VA.getLocMemOffset(),
986 InVals.size()));
987 // Reserve space for this result.
988 InVals.push_back(SDValue());
989 }
990 }
991
992 // Copy results out of memory.
993 SmallVector<SDValue, 4> MemOpChains;
994 for (unsigned i = 0, e = ResultMemLocs.size(); i != e; ++i) {
995 int offset = ResultMemLocs[i].first;
996 unsigned index = ResultMemLocs[i].second;
997 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
998 SDValue Ops[] = { Chain, DAG.getConstant(offset / 4, dl, MVT::i32) };
999 SDValue load = DAG.getNode(XCoreISD::LDWSP, dl, VTs, Ops);
1000 InVals[index] = load;
1001 MemOpChains.push_back(load.getValue(1));
1002 }
1003
1004 // Transform all loads nodes into one single node because
1005 // all load nodes are independent of each other.
1006 if (!MemOpChains.empty())
1007 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
1008
1009 return Chain;
1010}
1011
1012/// LowerCCCCallTo - functions arguments are copied from virtual
1013/// regs to (physical regs)/(stack frame), CALLSEQ_START and
1014/// CALLSEQ_END are emitted.
1015/// TODO: isTailCall, sret.
1016SDValue XCoreTargetLowering::LowerCCCCallTo(
1017 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg,
1018 bool isTailCall, const SmallVectorImpl<ISD::OutputArg> &Outs,
1019 const SmallVectorImpl<SDValue> &OutVals,
1020 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1021 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1022
1023 // Analyze operands of the call, assigning locations to each operand.
1025 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1026 *DAG.getContext());
1027
1028 // The ABI dictates there should be one stack slot available to the callee
1029 // on function entry (for saving lr).
1030 CCInfo.AllocateStack(4, Align(4));
1031
1032 CCInfo.AnalyzeCallOperands(Outs, CC_XCore);
1033
1035 // Analyze return values to determine the number of bytes of stack required.
1036 CCState RetCCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1037 *DAG.getContext());
1038 RetCCInfo.AllocateStack(CCInfo.getStackSize(), Align(4));
1039 RetCCInfo.AnalyzeCallResult(Ins, RetCC_XCore);
1040
1041 // Get a count of how many bytes are to be pushed on the stack.
1042 unsigned NumBytes = RetCCInfo.getStackSize();
1043
1044 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
1045
1047 SmallVector<SDValue, 12> MemOpChains;
1048
1049 // Walk the register/memloc assignments, inserting copies/loads.
1050 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1051 CCValAssign &VA = ArgLocs[i];
1052 SDValue Arg = OutVals[i];
1053
1054 // Promote the value if needed.
1055 switch (VA.getLocInfo()) {
1056 default: llvm_unreachable("Unknown loc info!");
1057 case CCValAssign::Full: break;
1058 case CCValAssign::SExt:
1059 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
1060 break;
1061 case CCValAssign::ZExt:
1062 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
1063 break;
1064 case CCValAssign::AExt:
1065 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
1066 break;
1067 }
1068
1069 // Arguments that can be passed on register must be kept at
1070 // RegsToPass vector
1071 if (VA.isRegLoc()) {
1072 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1073 } else {
1074 assert(VA.isMemLoc());
1075
1076 int Offset = VA.getLocMemOffset();
1077
1078 MemOpChains.push_back(DAG.getNode(XCoreISD::STWSP, dl, MVT::Other,
1079 Chain, Arg,
1080 DAG.getConstant(Offset/4, dl,
1081 MVT::i32)));
1082 }
1083 }
1084
1085 // Transform all store nodes into one single node because
1086 // all store nodes are independent of each other.
1087 if (!MemOpChains.empty())
1088 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
1089
1090 // Build a sequence of copy-to-reg nodes chained together with token
1091 // chain and flag operands which copy the outgoing args into registers.
1092 // The InGlue in necessary since all emitted instructions must be
1093 // stuck together.
1094 SDValue InGlue;
1095 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1096 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
1097 RegsToPass[i].second, InGlue);
1098 InGlue = Chain.getValue(1);
1099 }
1100
1101 // If the callee is a GlobalAddress node (quite common, every direct call is)
1102 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1103 // Likewise ExternalSymbol -> TargetExternalSymbol.
1104 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1105 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32);
1106 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
1107 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32);
1108
1109 // XCoreBranchLink = #chain, #target_address, #opt_in_flags...
1110 // = Chain, Callee, Reg#1, Reg#2, ...
1111 //
1112 // Returns a chain & a flag for retval copy to use.
1113 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1115 Ops.push_back(Chain);
1116 Ops.push_back(Callee);
1117
1118 // Add argument registers to the end of the list so that they are
1119 // known live into the call.
1120 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1121 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1122 RegsToPass[i].second.getValueType()));
1123
1124 if (InGlue.getNode())
1125 Ops.push_back(InGlue);
1126
1127 Chain = DAG.getNode(XCoreISD::BL, dl, NodeTys, Ops);
1128 InGlue = Chain.getValue(1);
1129
1130 // Create the CALLSEQ_END node.
1131 Chain = DAG.getCALLSEQ_END(Chain, NumBytes, 0, InGlue, dl);
1132 InGlue = Chain.getValue(1);
1133
1134 // Handle result values, copying them out of physregs into vregs that we
1135 // return.
1136 return LowerCallResult(Chain, InGlue, RVLocs, dl, DAG, InVals);
1137}
1138
1139//===----------------------------------------------------------------------===//
1140// Formal Arguments Calling Convention Implementation
1141//===----------------------------------------------------------------------===//
1142
1143namespace {
1144 struct ArgDataPair { SDValue SDV; ISD::ArgFlagsTy Flags; };
1145}
1146
1147/// XCore formal arguments implementation
1148SDValue XCoreTargetLowering::LowerFormalArguments(
1149 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1150 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1151 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1152 switch (CallConv)
1153 {
1154 default:
1155 report_fatal_error("Unsupported calling convention");
1156 case CallingConv::C:
1157 case CallingConv::Fast:
1158 return LowerCCCArguments(Chain, CallConv, isVarArg,
1159 Ins, dl, DAG, InVals);
1160 }
1161}
1162
1163/// LowerCCCArguments - transform physical registers into
1164/// virtual registers and generate load operations for
1165/// arguments places on the stack.
1166/// TODO: sret
1167SDValue XCoreTargetLowering::LowerCCCArguments(
1168 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1169 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1170 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1172 MachineFrameInfo &MFI = MF.getFrameInfo();
1175
1176 // Assign locations to all of the incoming arguments.
1178 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1179 *DAG.getContext());
1180
1181 CCInfo.AnalyzeFormalArguments(Ins, CC_XCore);
1182
1183 unsigned StackSlotSize = XCoreFrameLowering::stackSlotSize();
1184
1185 unsigned LRSaveSize = StackSlotSize;
1186
1187 if (!isVarArg)
1188 XFI->setReturnStackOffset(CCInfo.getStackSize() + LRSaveSize);
1189
1190 // All getCopyFromReg ops must precede any getMemcpys to prevent the
1191 // scheduler clobbering a register before it has been copied.
1192 // The stages are:
1193 // 1. CopyFromReg (and load) arg & vararg registers.
1194 // 2. Chain CopyFromReg nodes into a TokenFactor.
1195 // 3. Memcpy 'byVal' args & push final InVals.
1196 // 4. Chain mem ops nodes into a TokenFactor.
1197 SmallVector<SDValue, 4> CFRegNode;
1200
1201 // 1a. CopyFromReg (and load) arg registers.
1202 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1203
1204 CCValAssign &VA = ArgLocs[i];
1205 SDValue ArgIn;
1206
1207 if (VA.isRegLoc()) {
1208 // Arguments passed in registers
1209 EVT RegVT = VA.getLocVT();
1210 switch (RegVT.getSimpleVT().SimpleTy) {
1211 default:
1212 {
1213#ifndef NDEBUG
1214 errs() << "LowerFormalArguments Unhandled argument type: "
1215 << RegVT << "\n";
1216#endif
1217 llvm_unreachable(nullptr);
1218 }
1219 case MVT::i32:
1220 Register VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass);
1221 RegInfo.addLiveIn(VA.getLocReg(), VReg);
1222 ArgIn = DAG.getCopyFromReg(Chain, dl, VReg, RegVT);
1223 CFRegNode.push_back(ArgIn.getValue(ArgIn->getNumValues() - 1));
1224 }
1225 } else {
1226 // Only arguments passed on the stack should make it here.
1227 assert(VA.isMemLoc());
1228 // Load the argument to a virtual register
1229 unsigned ObjSize = VA.getLocVT().getSizeInBits()/8;
1230 if (ObjSize > StackSlotSize) {
1231 errs() << "LowerFormalArguments Unhandled argument type: "
1232 << VA.getLocVT() << "\n";
1233 }
1234 // Create the frame index object for this incoming parameter...
1235 int FI = MFI.CreateFixedObject(ObjSize,
1236 LRSaveSize + VA.getLocMemOffset(),
1237 true);
1238
1239 // Create the SelectionDAG nodes corresponding to a load
1240 //from this parameter
1241 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1242 ArgIn = DAG.getLoad(VA.getLocVT(), dl, Chain, FIN,
1244 }
1245 const ArgDataPair ADP = { ArgIn, Ins[i].Flags };
1246 ArgData.push_back(ADP);
1247 }
1248
1249 // 1b. CopyFromReg vararg registers.
1250 if (isVarArg) {
1251 // Argument registers
1252 static const MCPhysReg ArgRegs[] = {
1253 XCore::R0, XCore::R1, XCore::R2, XCore::R3
1254 };
1256 unsigned FirstVAReg = CCInfo.getFirstUnallocated(ArgRegs);
1257 if (FirstVAReg < std::size(ArgRegs)) {
1258 int offset = 0;
1259 // Save remaining registers, storing higher register numbers at a higher
1260 // address
1261 for (int i = std::size(ArgRegs) - 1; i >= (int)FirstVAReg; --i) {
1262 // Create a stack slot
1263 int FI = MFI.CreateFixedObject(4, offset, true);
1264 if (i == (int)FirstVAReg) {
1265 XFI->setVarArgsFrameIndex(FI);
1266 }
1267 offset -= StackSlotSize;
1268 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1269 // Move argument from phys reg -> virt reg
1270 Register VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass);
1271 RegInfo.addLiveIn(ArgRegs[i], VReg);
1272 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
1273 CFRegNode.push_back(Val.getValue(Val->getNumValues() - 1));
1274 // Move argument from virt reg -> stack
1275 SDValue Store =
1276 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
1277 MemOps.push_back(Store);
1278 }
1279 } else {
1280 // This will point to the next argument passed via stack.
1282 MFI.CreateFixedObject(4, LRSaveSize + CCInfo.getStackSize(), true));
1283 }
1284 }
1285
1286 // 2. chain CopyFromReg nodes into a TokenFactor.
1287 if (!CFRegNode.empty())
1288 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, CFRegNode);
1289
1290 // 3. Memcpy 'byVal' args & push final InVals.
1291 // Aggregates passed "byVal" need to be copied by the callee.
1292 // The callee will use a pointer to this copy, rather than the original
1293 // pointer.
1294 for (const ArgDataPair &ArgDI : ArgData) {
1295 if (ArgDI.Flags.isByVal() && ArgDI.Flags.getByValSize()) {
1296 unsigned Size = ArgDI.Flags.getByValSize();
1297 Align Alignment =
1298 std::max(Align(StackSlotSize), ArgDI.Flags.getNonZeroByValAlign());
1299 // Create a new object on the stack and copy the pointee into it.
1300 int FI = MFI.CreateStackObject(Size, Alignment, false);
1301 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1302 InVals.push_back(FIN);
1303 MemOps.push_back(DAG.getMemcpy(
1304 Chain, dl, FIN, ArgDI.SDV, DAG.getConstant(Size, dl, MVT::i32),
1305 Alignment, false, false, false, MachinePointerInfo(),
1307 } else {
1308 InVals.push_back(ArgDI.SDV);
1309 }
1310 }
1311
1312 // 4, chain mem ops nodes into a TokenFactor.
1313 if (!MemOps.empty()) {
1314 MemOps.push_back(Chain);
1315 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
1316 }
1317
1318 return Chain;
1319}
1320
1321//===----------------------------------------------------------------------===//
1322// Return Value Calling Convention Implementation
1323//===----------------------------------------------------------------------===//
1324
1325bool XCoreTargetLowering::
1326CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
1327 bool isVarArg,
1329 LLVMContext &Context) const {
1331 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
1332 if (!CCInfo.CheckReturn(Outs, RetCC_XCore))
1333 return false;
1334 if (CCInfo.getStackSize() != 0 && isVarArg)
1335 return false;
1336 return true;
1337}
1338
1339SDValue
1340XCoreTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
1341 bool isVarArg,
1343 const SmallVectorImpl<SDValue> &OutVals,
1344 const SDLoc &dl, SelectionDAG &DAG) const {
1345
1346 XCoreFunctionInfo *XFI =
1349
1350 // CCValAssign - represent the assignment of
1351 // the return value to a location
1353
1354 // CCState - Info about the registers and stack slot.
1355 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1356 *DAG.getContext());
1357
1358 // Analyze return values.
1359 if (!isVarArg)
1360 CCInfo.AllocateStack(XFI->getReturnStackOffset(), Align(4));
1361
1362 CCInfo.AnalyzeReturn(Outs, RetCC_XCore);
1363
1364 SDValue Glue;
1365 SmallVector<SDValue, 4> RetOps(1, Chain);
1366
1367 // Return on XCore is always a "retsp 0"
1368 RetOps.push_back(DAG.getConstant(0, dl, MVT::i32));
1369
1370 SmallVector<SDValue, 4> MemOpChains;
1371 // Handle return values that must be copied to memory.
1372 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1373 CCValAssign &VA = RVLocs[i];
1374 if (VA.isRegLoc())
1375 continue;
1376 assert(VA.isMemLoc());
1377 if (isVarArg) {
1378 report_fatal_error("Can't return value from vararg function in memory");
1379 }
1380
1381 int Offset = VA.getLocMemOffset();
1382 unsigned ObjSize = VA.getLocVT().getSizeInBits() / 8;
1383 // Create the frame index object for the memory location.
1384 int FI = MFI.CreateFixedObject(ObjSize, Offset, false);
1385
1386 // Create a SelectionDAG node corresponding to a store
1387 // to this memory location.
1388 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1389 MemOpChains.push_back(DAG.getStore(
1390 Chain, dl, OutVals[i], FIN,
1392 }
1393
1394 // Transform all store nodes into one single node because
1395 // all stores are independent of each other.
1396 if (!MemOpChains.empty())
1397 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
1398
1399 // Now handle return values copied to registers.
1400 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1401 CCValAssign &VA = RVLocs[i];
1402 if (!VA.isRegLoc())
1403 continue;
1404 // Copy the result values into the output registers.
1405 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Glue);
1406
1407 // guarantee that all emitted copies are
1408 // stuck together, avoiding something bad
1409 Glue = Chain.getValue(1);
1410 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
1411 }
1412
1413 RetOps[0] = Chain; // Update chain.
1414
1415 // Add the glue if we have it.
1416 if (Glue.getNode())
1417 RetOps.push_back(Glue);
1418
1419 return DAG.getNode(XCoreISD::RETSP, dl, MVT::Other, RetOps);
1420}
1421
1422//===----------------------------------------------------------------------===//
1423// Other Lowering Code
1424//===----------------------------------------------------------------------===//
1425
1428 MachineBasicBlock *BB) const {
1429 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
1430 DebugLoc dl = MI.getDebugLoc();
1431 assert((MI.getOpcode() == XCore::SELECT_CC) &&
1432 "Unexpected instr type to insert");
1433
1434 // To "insert" a SELECT_CC instruction, we actually have to insert the diamond
1435 // control-flow pattern. The incoming instruction knows the destination vreg
1436 // to set, the condition code register to branch on, the true/false values to
1437 // select between, and a branch opcode to use.
1438 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1440
1441 // thisMBB:
1442 // ...
1443 // TrueVal = ...
1444 // cmpTY ccX, r1, r2
1445 // bCC copy1MBB
1446 // fallthrough --> copy0MBB
1447 MachineBasicBlock *thisMBB = BB;
1448 MachineFunction *F = BB->getParent();
1449 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
1450 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
1451 F->insert(It, copy0MBB);
1452 F->insert(It, sinkMBB);
1453
1454 // Transfer the remainder of BB and its successor edges to sinkMBB.
1455 sinkMBB->splice(sinkMBB->begin(), BB,
1456 std::next(MachineBasicBlock::iterator(MI)), BB->end());
1458
1459 // Next, add the true and fallthrough blocks as its successors.
1460 BB->addSuccessor(copy0MBB);
1461 BB->addSuccessor(sinkMBB);
1462
1463 BuildMI(BB, dl, TII.get(XCore::BRFT_lru6))
1464 .addReg(MI.getOperand(1).getReg())
1465 .addMBB(sinkMBB);
1466
1467 // copy0MBB:
1468 // %FalseValue = ...
1469 // # fallthrough to sinkMBB
1470 BB = copy0MBB;
1471
1472 // Update machine-CFG edges
1473 BB->addSuccessor(sinkMBB);
1474
1475 // sinkMBB:
1476 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
1477 // ...
1478 BB = sinkMBB;
1479 BuildMI(*BB, BB->begin(), dl, TII.get(XCore::PHI), MI.getOperand(0).getReg())
1480 .addReg(MI.getOperand(3).getReg())
1481 .addMBB(copy0MBB)
1482 .addReg(MI.getOperand(2).getReg())
1483 .addMBB(thisMBB);
1484
1485 MI.eraseFromParent(); // The pseudo instruction is gone now.
1486 return BB;
1487}
1488
1489//===----------------------------------------------------------------------===//
1490// Target Optimization Hooks
1491//===----------------------------------------------------------------------===//
1492
1493SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
1494 DAGCombinerInfo &DCI) const {
1495 SelectionDAG &DAG = DCI.DAG;
1496 SDLoc dl(N);
1497 switch (N->getOpcode()) {
1498 default: break;
1500 switch (N->getConstantOperandVal(1)) {
1501 case Intrinsic::xcore_outt:
1502 case Intrinsic::xcore_outct:
1503 case Intrinsic::xcore_chkct: {
1504 SDValue OutVal = N->getOperand(3);
1505 // These instructions ignore the high bits.
1506 if (OutVal.hasOneUse()) {
1507 unsigned BitWidth = OutVal.getValueSizeInBits();
1508 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 8);
1509 KnownBits Known;
1510 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
1511 !DCI.isBeforeLegalizeOps());
1512 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1513 if (TLI.ShrinkDemandedConstant(OutVal, DemandedMask, TLO) ||
1514 TLI.SimplifyDemandedBits(OutVal, DemandedMask, Known, TLO))
1515 DCI.CommitTargetLoweringOpt(TLO);
1516 }
1517 break;
1518 }
1519 case Intrinsic::xcore_setpt: {
1520 SDValue Time = N->getOperand(3);
1521 // This instruction ignores the high bits.
1522 if (Time.hasOneUse()) {
1523 unsigned BitWidth = Time.getValueSizeInBits();
1524 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 16);
1525 KnownBits Known;
1526 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
1527 !DCI.isBeforeLegalizeOps());
1528 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1529 if (TLI.ShrinkDemandedConstant(Time, DemandedMask, TLO) ||
1530 TLI.SimplifyDemandedBits(Time, DemandedMask, Known, TLO))
1531 DCI.CommitTargetLoweringOpt(TLO);
1532 }
1533 break;
1534 }
1535 }
1536 break;
1537 case XCoreISD::LADD: {
1538 SDValue N0 = N->getOperand(0);
1539 SDValue N1 = N->getOperand(1);
1540 SDValue N2 = N->getOperand(2);
1541 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1542 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1543 EVT VT = N0.getValueType();
1544
1545 // canonicalize constant to RHS
1546 if (N0C && !N1C)
1547 return DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N1, N0, N2);
1548
1549 // fold (ladd 0, 0, x) -> 0, x & 1
1550 if (N0C && N0C->isZero() && N1C && N1C->isZero()) {
1551 SDValue Carry = DAG.getConstant(0, dl, VT);
1552 SDValue Result = DAG.getNode(ISD::AND, dl, VT, N2,
1553 DAG.getConstant(1, dl, VT));
1554 SDValue Ops[] = { Result, Carry };
1555 return DAG.getMergeValues(Ops, dl);
1556 }
1557
1558 // fold (ladd x, 0, y) -> 0, add x, y iff carry is unused and y has only the
1559 // low bit set
1560 if (N1C && N1C->isZero() && N->hasNUsesOfValue(0, 1)) {
1562 VT.getSizeInBits() - 1);
1563 KnownBits Known = DAG.computeKnownBits(N2);
1564 if ((Known.Zero & Mask) == Mask) {
1565 SDValue Carry = DAG.getConstant(0, dl, VT);
1566 SDValue Result = DAG.getNode(ISD::ADD, dl, VT, N0, N2);
1567 SDValue Ops[] = { Result, Carry };
1568 return DAG.getMergeValues(Ops, dl);
1569 }
1570 }
1571 }
1572 break;
1573 case XCoreISD::LSUB: {
1574 SDValue N0 = N->getOperand(0);
1575 SDValue N1 = N->getOperand(1);
1576 SDValue N2 = N->getOperand(2);
1577 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1578 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1579 EVT VT = N0.getValueType();
1580
1581 // fold (lsub 0, 0, x) -> x, -x iff x has only the low bit set
1582 if (N0C && N0C->isZero() && N1C && N1C->isZero()) {
1584 VT.getSizeInBits() - 1);
1585 KnownBits Known = DAG.computeKnownBits(N2);
1586 if ((Known.Zero & Mask) == Mask) {
1587 SDValue Borrow = N2;
1588 SDValue Result = DAG.getNode(ISD::SUB, dl, VT,
1589 DAG.getConstant(0, dl, VT), N2);
1590 SDValue Ops[] = { Result, Borrow };
1591 return DAG.getMergeValues(Ops, dl);
1592 }
1593 }
1594
1595 // fold (lsub x, 0, y) -> 0, sub x, y iff borrow is unused and y has only the
1596 // low bit set
1597 if (N1C && N1C->isZero() && N->hasNUsesOfValue(0, 1)) {
1599 VT.getSizeInBits() - 1);
1600 KnownBits Known = DAG.computeKnownBits(N2);
1601 if ((Known.Zero & Mask) == Mask) {
1602 SDValue Borrow = DAG.getConstant(0, dl, VT);
1603 SDValue Result = DAG.getNode(ISD::SUB, dl, VT, N0, N2);
1604 SDValue Ops[] = { Result, Borrow };
1605 return DAG.getMergeValues(Ops, dl);
1606 }
1607 }
1608 }
1609 break;
1610 case XCoreISD::LMUL: {
1611 SDValue N0 = N->getOperand(0);
1612 SDValue N1 = N->getOperand(1);
1613 SDValue N2 = N->getOperand(2);
1614 SDValue N3 = N->getOperand(3);
1615 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1616 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1617 EVT VT = N0.getValueType();
1618 // Canonicalize multiplicative constant to RHS. If both multiplicative
1619 // operands are constant canonicalize smallest to RHS.
1620 if ((N0C && !N1C) ||
1621 (N0C && N1C && N0C->getZExtValue() < N1C->getZExtValue()))
1622 return DAG.getNode(XCoreISD::LMUL, dl, DAG.getVTList(VT, VT),
1623 N1, N0, N2, N3);
1624
1625 // lmul(x, 0, a, b)
1626 if (N1C && N1C->isZero()) {
1627 // If the high result is unused fold to add(a, b)
1628 if (N->hasNUsesOfValue(0, 0)) {
1629 SDValue Lo = DAG.getNode(ISD::ADD, dl, VT, N2, N3);
1630 SDValue Ops[] = { Lo, Lo };
1631 return DAG.getMergeValues(Ops, dl);
1632 }
1633 // Otherwise fold to ladd(a, b, 0)
1634 SDValue Result =
1635 DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N2, N3, N1);
1636 SDValue Carry(Result.getNode(), 1);
1637 SDValue Ops[] = { Carry, Result };
1638 return DAG.getMergeValues(Ops, dl);
1639 }
1640 }
1641 break;
1642 case ISD::ADD: {
1643 // Fold 32 bit expressions such as add(add(mul(x,y),a),b) ->
1644 // lmul(x, y, a, b). The high result of lmul will be ignored.
1645 // This is only profitable if the intermediate results are unused
1646 // elsewhere.
1647 SDValue Mul0, Mul1, Addend0, Addend1;
1648 if (N->getValueType(0) == MVT::i32 &&
1649 isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, true)) {
1650 SDValue Ignored = DAG.getNode(XCoreISD::LMUL, dl,
1651 DAG.getVTList(MVT::i32, MVT::i32), Mul0,
1652 Mul1, Addend0, Addend1);
1653 SDValue Result(Ignored.getNode(), 1);
1654 return Result;
1655 }
1656 APInt HighMask = APInt::getHighBitsSet(64, 32);
1657 // Fold 64 bit expression such as add(add(mul(x,y),a),b) ->
1658 // lmul(x, y, a, b) if all operands are zero-extended. We do this
1659 // before type legalization as it is messy to match the operands after
1660 // that.
1661 if (N->getValueType(0) == MVT::i64 &&
1662 isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, false) &&
1663 DAG.MaskedValueIsZero(Mul0, HighMask) &&
1664 DAG.MaskedValueIsZero(Mul1, HighMask) &&
1665 DAG.MaskedValueIsZero(Addend0, HighMask) &&
1666 DAG.MaskedValueIsZero(Addend1, HighMask)) {
1667 SDValue Mul0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1668 Mul0, DAG.getConstant(0, dl, MVT::i32));
1669 SDValue Mul1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1670 Mul1, DAG.getConstant(0, dl, MVT::i32));
1671 SDValue Addend0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1672 Addend0, DAG.getConstant(0, dl, MVT::i32));
1673 SDValue Addend1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1674 Addend1, DAG.getConstant(0, dl, MVT::i32));
1675 SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl,
1676 DAG.getVTList(MVT::i32, MVT::i32), Mul0L, Mul1L,
1677 Addend0L, Addend1L);
1678 SDValue Lo(Hi.getNode(), 1);
1679 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
1680 }
1681 }
1682 break;
1683 case ISD::STORE: {
1684 // Replace unaligned store of unaligned load with memmove.
1685 StoreSDNode *ST = cast<StoreSDNode>(N);
1686 if (!DCI.isBeforeLegalize() ||
1688 ST->getMemoryVT(),
1689 *ST->getMemOperand()) ||
1690 ST->isVolatile() || ST->isIndexed()) {
1691 break;
1692 }
1693 SDValue Chain = ST->getChain();
1694
1695 unsigned StoreBits = ST->getMemoryVT().getStoreSizeInBits();
1696 assert((StoreBits % 8) == 0 &&
1697 "Store size in bits must be a multiple of 8");
1698 Align Alignment = ST->getAlign();
1699
1700 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(ST->getValue())) {
1701 if (LD->hasNUsesOfValue(1, 0) && ST->getMemoryVT() == LD->getMemoryVT() &&
1702 LD->getAlign() == Alignment &&
1703 !LD->isVolatile() && !LD->isIndexed() &&
1705 bool isTail = isInTailCallPosition(DAG, ST, Chain);
1706 return DAG.getMemmove(Chain, dl, ST->getBasePtr(), LD->getBasePtr(),
1707 DAG.getConstant(StoreBits / 8, dl, MVT::i32),
1708 Alignment, false, isTail,
1709 ST->getPointerInfo(), LD->getPointerInfo());
1710 }
1711 }
1712 break;
1713 }
1714 }
1715 return SDValue();
1716}
1717
1718void XCoreTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
1719 KnownBits &Known,
1720 const APInt &DemandedElts,
1721 const SelectionDAG &DAG,
1722 unsigned Depth) const {
1723 Known.resetAll();
1724 switch (Op.getOpcode()) {
1725 default: break;
1726 case XCoreISD::LADD:
1727 case XCoreISD::LSUB:
1728 if (Op.getResNo() == 1) {
1729 // Top bits of carry / borrow are clear.
1730 Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
1731 Known.getBitWidth() - 1);
1732 }
1733 break;
1735 {
1736 unsigned IntNo = Op.getConstantOperandVal(1);
1737 switch (IntNo) {
1738 case Intrinsic::xcore_getts:
1739 // High bits are known to be zero.
1740 Known.Zero =
1741 APInt::getHighBitsSet(Known.getBitWidth(), Known.getBitWidth() - 16);
1742 break;
1743 case Intrinsic::xcore_int:
1744 case Intrinsic::xcore_inct:
1745 // High bits are known to be zero.
1746 Known.Zero =
1747 APInt::getHighBitsSet(Known.getBitWidth(), Known.getBitWidth() - 8);
1748 break;
1749 case Intrinsic::xcore_testct:
1750 // Result is either 0 or 1.
1751 Known.Zero =
1752 APInt::getHighBitsSet(Known.getBitWidth(), Known.getBitWidth() - 1);
1753 break;
1754 case Intrinsic::xcore_testwct:
1755 // Result is in the range 0 - 4.
1756 Known.Zero =
1757 APInt::getHighBitsSet(Known.getBitWidth(), Known.getBitWidth() - 3);
1758 break;
1759 }
1760 }
1761 break;
1762 }
1763}
1764
1765//===----------------------------------------------------------------------===//
1766// Addressing mode description hooks
1767//===----------------------------------------------------------------------===//
1768
1769static inline bool isImmUs(int64_t val)
1770{
1771 return (val >= 0 && val <= 11);
1772}
1773
1774static inline bool isImmUs2(int64_t val)
1775{
1776 return (val%2 == 0 && isImmUs(val/2));
1777}
1778
1779static inline bool isImmUs4(int64_t val)
1780{
1781 return (val%4 == 0 && isImmUs(val/4));
1782}
1783
1784/// isLegalAddressingMode - Return true if the addressing mode represented
1785/// by AM is legal for this target, for a load/store of the specified type.
1787 const AddrMode &AM, Type *Ty,
1788 unsigned AS,
1789 Instruction *I) const {
1790 if (Ty->getTypeID() == Type::VoidTyID)
1791 return AM.Scale == 0 && isImmUs(AM.BaseOffs) && isImmUs4(AM.BaseOffs);
1792
1793 unsigned Size = DL.getTypeAllocSize(Ty);
1794 if (AM.BaseGV) {
1795 return Size >= 4 && !AM.HasBaseReg && AM.Scale == 0 &&
1796 AM.BaseOffs%4 == 0;
1797 }
1798
1799 switch (Size) {
1800 case 1:
1801 // reg + imm
1802 if (AM.Scale == 0) {
1803 return isImmUs(AM.BaseOffs);
1804 }
1805 // reg + reg
1806 return AM.Scale == 1 && AM.BaseOffs == 0;
1807 case 2:
1808 case 3:
1809 // reg + imm
1810 if (AM.Scale == 0) {
1811 return isImmUs2(AM.BaseOffs);
1812 }
1813 // reg + reg<<1
1814 return AM.Scale == 2 && AM.BaseOffs == 0;
1815 default:
1816 // reg + imm
1817 if (AM.Scale == 0) {
1818 return isImmUs4(AM.BaseOffs);
1819 }
1820 // reg + reg<<2
1821 return AM.Scale == 4 && AM.BaseOffs == 0;
1822 }
1823}
1824
1825//===----------------------------------------------------------------------===//
1826// XCore Inline Assembly Support
1827//===----------------------------------------------------------------------===//
1828
1829std::pair<unsigned, const TargetRegisterClass *>
1830XCoreTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
1831 StringRef Constraint,
1832 MVT VT) const {
1833 if (Constraint.size() == 1) {
1834 switch (Constraint[0]) {
1835 default : break;
1836 case 'r':
1837 return std::make_pair(0U, &XCore::GRRegsRegClass);
1838 }
1839 }
1840 // Use the default implementation in TargetLowering to convert the register
1841 // constraint into a member of a register class.
1842 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
1843}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
AMDGPU Mark last scratch load
Function Alias Analysis Results
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
uint64_t Addr
uint64_t Size
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
#define G(x, y, z)
Definition: MD5.cpp:56
unsigned const TargetRegisterInfo * TRI
uint64_t High
LLVMContext & Context
const char LLVMTargetMachineRef TM
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Value * RHS
Value * LHS
static SDValue LowerCallResult(SDValue Chain, SDValue InGlue, const SmallVectorImpl< CCValAssign > &RVLocs, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals)
LowerCallResult - Lower the result values of a call into the appropriate copies out of appropriate ph...
static bool isImmUs(int64_t val)
static bool isImmUs4(int64_t val)
static bool IsSmallObject(const GlobalValue *GV, const XCoreTargetLowering &XTL)
static bool isWordAligned(SDValue Value, SelectionDAG &DAG)
static bool isADDADDMUL(SDValue Op, SDValue &Mul0, SDValue &Mul1, SDValue &Addend0, SDValue &Addend1, bool requireIntermediatesHaveOneUse)
isADDADDMUL - Return whether Op is in a form that is equivalent to add(add(mul(x,y),...
static bool isImmUs2(int64_t val)
Class for arbitrary precision integers.
Definition: APInt.h:76
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition: APInt.h:284
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
Definition: APInt.h:274
LLVM Basic Block Representation.
Definition: BasicBlock.h:60
The address of a basic block.
Definition: Constants.h:888
CCState - This class holds information needed while lowering arguments and return values.
CCValAssign - Represent assignment of one arg/retval to a location.
bool isRegLoc() const
Register getLocReg() const
LocInfo getLocInfo() const
bool isMemLoc() const
int64_t getLocMemOffset() const
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, bool InBounds=false, std::optional< unsigned > InRangeIndex=std::nullopt, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
Definition: Constants.h:1201
uint64_t getZExtValue() const
This is an important base class in LLVM.
Definition: Constant.h:41
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
Definition: DataLayout.cpp:878
A debug info location.
Definition: DebugLoc.h:33
const GlobalValue * getGlobal() const
bool hasLocalLinkage() const
Definition: GlobalValue.h:527
StringRef getSection() const
Definition: Globals.cpp:174
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:655
bool hasSection() const
Definition: GlobalValue.h:290
Type * getValueType() const
Definition: GlobalValue.h:296
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
This class is used to represent ISD::LOAD nodes.
Machine Value Type.
SimpleValueType SimpleTy
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineJumpTableInfo * getJumpTableInfo() const
getJumpTableInfo - Return the jump table info object for the current function.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
Definition: MachineInstr.h:69
@ EK_Inline
EK_Inline - Jump table entries are emitted inline at their point of use.
const std::vector< MachineJumpTableEntry > & getJumpTables() const
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition: Module.h:287
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
bool reachesChainWithoutSideEffects(SDValue Dest, unsigned Depth=2) const
Return true if this operand (which must be a chain) reaches the specified operand without crossing an...
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:225
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:722
SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
const TargetLowering & getTargetLoweringInfo() const
Definition: SelectionDAG.h:478
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
Definition: SelectionDAG.h:732
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:472
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, bool isTargetGA=false, unsigned TargetFlags=0)
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
SDValue getRegister(unsigned Reg, EVT VT)
SDValue getExternalSymbol(const char *Sym, EVT VT)
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
Definition: SelectionDAG.h:773
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:768
bool isBaseWithConstantOffset(SDValue Op) const
Return true if the specified operand is an ISD::ADD with a ConstantSDNode on the right-hand side,...
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:469
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
Definition: SelectionDAG.h:799
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
LLVMContext * getContext() const
Definition: SelectionDAG.h:485
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:739
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
Definition: SelectionDAG.h:554
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
This class is used to represent ISD::STORE nodes.
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:257
constexpr size_t size() const
size - Get the string size.
Definition: StringRef.h:137
TargetInstrInfo - Interface to description of machine instruction set.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
const TargetMachine & getTargetMachine() const
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
unsigned MaxStoresPerMemmove
Specify maximum number of store instructions per memmove call.
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
unsigned MaxStoresPerMemmoveOptSize
Likewise for functions with the OptSize attribute.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setPrefFunctionAlignment(Align Alignment)
Set the target's preferred function alignment.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
std::vector< ArgListEntry > ArgListTy
bool allowsMemoryAccessForAlignment(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const
This function returns true if the memory access is aligned or if the target allows this specific unal...
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
bool ShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, TargetLoweringOpt &TLO) const
Check to see if the specified operand of the specified instruction is a constant integer.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0, bool AssumeSingleUse=false) const
Look at Op.
bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node, SDValue &Chain) const
Check whether a given call node is in tail position within its function.
virtual bool isGAPlusOffset(SDNode *N, const GlobalValue *&GA, int64_t &Offset) const
Returns true (and the GlobalValue and the offset) if the node is a GlobalAddress + offset.
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:76
CodeModel::Model getCodeModel() const
Returns the code model.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
@ VoidTyID
type with no size
Definition: Type.h:63
static Type * getVoidTy(LLVMContext &C)
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:302
static IntegerType * getInt8Ty(LLVMContext &C)
bool isFunctionTy() const
True if this is an instance of FunctionType.
Definition: Type.h:246
static IntegerType * getInt32Ty(LLVMContext &C)
TypeID getTypeID() const
Return the type id for the type.
Definition: Type.h:137
LLVM Value Representation.
Definition: Value.h:74
Align getPointerAlignment(const DataLayout &DL) const
Returns an alignment of the pointer value.
Definition: Value.cpp:926
static int stackSlotSize()
Stack slot size (4 bytes)
XCoreFunctionInfo - This class is derived from MachineFunction private XCore target-specific informat...
void setReturnStackOffset(unsigned value)
int createLRSpillSlot(MachineFunction &MF)
const TargetRegisterInfo * getRegisterInfo() const override
const XCoreInstrInfo * getInstrInfo() const override
bool isZExtFree(SDValue Val, EVT VT2) const override
Return true if zero-extending the specific node Val to type VT2 is free (either because it's implicit...
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
LowerOperation - Provide custom lowering hooks for some operations.
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
ReplaceNodeResults - Replace the results of node with an illegal result type with new values built ou...
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
XCoreTargetLowering(const TargetMachine &TM, const XCoreSubtarget &Subtarget)
const char * getTargetNodeName(unsigned Opcode) const override
getTargetNodeName - This method returns the name of a target specific
self_iterator getIterator()
Definition: ilist_node.h:109
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:121
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
Definition: ISDOpcodes.h:1126
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
Definition: ISDOpcodes.h:1122
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition: ISDOpcodes.h:250
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
Definition: ISDOpcodes.h:1155
@ FRAME_TO_ARGS_OFFSET
FRAME_TO_ARGS_OFFSET - This node represents offset from frame pointer to first (possible) on-stack ar...
Definition: ISDOpcodes.h:124
@ ADD
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:239
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
Definition: ISDOpcodes.h:1031
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition: ISDOpcodes.h:783
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
Definition: ISDOpcodes.h:199
@ RETURNADDR
Definition: ISDOpcodes.h:95
@ GlobalAddress
Definition: ISDOpcodes.h:78
@ MEMBARRIER
MEMBARRIER - Compiler barrier only; generate a no-op.
Definition: ISDOpcodes.h:1228
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
Definition: ISDOpcodes.h:1233
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
Definition: ISDOpcodes.h:229
@ INIT_TRAMPOLINE
INIT_TRAMPOLINE - This corresponds to the init_trampoline intrinsic.
Definition: ISDOpcodes.h:1199
@ EH_RETURN
OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents 'eh_return' gcc dwarf builtin,...
Definition: ISDOpcodes.h:135
@ SIGN_EXTEND
Conversion operators.
Definition: ISDOpcodes.h:774
@ BR_CC
BR_CC - Conditional branch.
Definition: ISDOpcodes.h:1077
@ BR_JT
BR_JT - Jumptable branch.
Definition: ISDOpcodes.h:1056
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
Definition: ISDOpcodes.h:222
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
Definition: ISDOpcodes.h:1151
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition: ISDOpcodes.h:651
@ SHL
Shift and rotation operations.
Definition: ISDOpcodes.h:705
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:780
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition: ISDOpcodes.h:742
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
Definition: ISDOpcodes.h:1041
@ ConstantPool
Definition: ISDOpcodes.h:82
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
Definition: ISDOpcodes.h:94
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:680
@ TRAP
TRAP - Trapping instruction.
Definition: ISDOpcodes.h:1208
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition: ISDOpcodes.h:184
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
Definition: ISDOpcodes.h:52
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
Definition: ISDOpcodes.h:1146
@ BlockAddress
Definition: ISDOpcodes.h:84
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
Definition: ISDOpcodes.h:763
@ ADJUST_TRAMPOLINE
ADJUST_TRAMPOLINE - This corresponds to the adjust_trampoline intrinsic.
Definition: ISDOpcodes.h:1205
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
Definition: ISDOpcodes.h:192
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
@ Offset
Definition: DWP.cpp:456
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
static const unsigned CodeModelLargeSize
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:156
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
@ Other
Any other memory.
@ Mul
Product of integers.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:191
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Extended Value Type.
Definition: ValueTypes.h:34
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition: ValueTypes.h:136
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:358
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:306
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition: ValueTypes.h:151
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
Definition: KnownBits.h:238
unsigned getBitWidth() const
Get the bit width of this value.
Definition: KnownBits.h:40
void resetAll()
Resets the known state of all bits.
Definition: KnownBits.h:71
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...