LLVM 20.0.0git
XCoreISelLowering.cpp
Go to the documentation of this file.
1//===-- XCoreISelLowering.cpp - XCore DAG Lowering Implementation ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the XCoreTargetLowering class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "XCoreISelLowering.h"
14#include "XCore.h"
16#include "XCoreSubtarget.h"
17#include "XCoreTargetMachine.h"
26#include "llvm/IR/CallingConv.h"
27#include "llvm/IR/Constants.h"
29#include "llvm/IR/Function.h"
30#include "llvm/IR/GlobalAlias.h"
32#include "llvm/IR/Intrinsics.h"
33#include "llvm/IR/IntrinsicsXCore.h"
34#include "llvm/Support/Debug.h"
38#include <algorithm>
39
40using namespace llvm;
41
42#define DEBUG_TYPE "xcore-lower"
43
45getTargetNodeName(unsigned Opcode) const
46{
47 switch ((XCoreISD::NodeType)Opcode)
48 {
49 case XCoreISD::FIRST_NUMBER : break;
50 case XCoreISD::BL : return "XCoreISD::BL";
51 case XCoreISD::PCRelativeWrapper : return "XCoreISD::PCRelativeWrapper";
52 case XCoreISD::DPRelativeWrapper : return "XCoreISD::DPRelativeWrapper";
53 case XCoreISD::CPRelativeWrapper : return "XCoreISD::CPRelativeWrapper";
54 case XCoreISD::LDWSP : return "XCoreISD::LDWSP";
55 case XCoreISD::STWSP : return "XCoreISD::STWSP";
56 case XCoreISD::RETSP : return "XCoreISD::RETSP";
57 case XCoreISD::LADD : return "XCoreISD::LADD";
58 case XCoreISD::LSUB : return "XCoreISD::LSUB";
59 case XCoreISD::LMUL : return "XCoreISD::LMUL";
60 case XCoreISD::MACCU : return "XCoreISD::MACCU";
61 case XCoreISD::MACCS : return "XCoreISD::MACCS";
62 case XCoreISD::CRC8 : return "XCoreISD::CRC8";
63 case XCoreISD::BR_JT : return "XCoreISD::BR_JT";
64 case XCoreISD::BR_JT32 : return "XCoreISD::BR_JT32";
65 case XCoreISD::FRAME_TO_ARGS_OFFSET : return "XCoreISD::FRAME_TO_ARGS_OFFSET";
66 case XCoreISD::EH_RETURN : return "XCoreISD::EH_RETURN";
67 }
68 return nullptr;
69}
70
72 const XCoreSubtarget &Subtarget)
73 : TargetLowering(TM), TM(TM), Subtarget(Subtarget) {
74
75 // Set up the register classes.
76 addRegisterClass(MVT::i32, &XCore::GRRegsRegClass);
77
78 // Compute derived properties from the register classes
80
82
84
85 // Use i32 for setcc operations results (slt, sgt, ...).
87 setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct?
88
89 // XCore does not have the NodeTypes below.
92
93 // 64bit
103
104 // Bit Manipulation
109
110 setOperationAction(ISD::TRAP, MVT::Other, Legal);
111
112 // Jump tables.
114
117
118 // Conversion of i64 -> double produces constantpool nodes
120
121 // Loads
122 for (MVT VT : MVT::integer_valuetypes()) {
126
128 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i16, Expand);
129 }
130
131 // Custom expand misaligned loads / stores.
134
135 // Varargs
140
141 // Dynamic stack
145
146 // Exception handling
149
151
152 // TRAMPOLINE is custom lowered.
155
156 // We want to custom lower some of our intrinsics.
158
162
163 // We have target-specific dag combine patterns for the following nodes:
166
169
170 // This target doesn't implement native atomics.
172}
173
175 if (Val.getOpcode() != ISD::LOAD)
176 return false;
177
178 EVT VT1 = Val.getValueType();
179 if (!VT1.isSimple() || !VT1.isInteger() ||
180 !VT2.isSimple() || !VT2.isInteger())
181 return false;
182
183 switch (VT1.getSimpleVT().SimpleTy) {
184 default: break;
185 case MVT::i8:
186 return true;
187 }
188
189 return false;
190}
191
194 switch (Op.getOpcode())
195 {
196 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
197 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
198 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
199 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
200 case ISD::BR_JT: return LowerBR_JT(Op, DAG);
201 case ISD::LOAD: return LowerLOAD(Op, DAG);
202 case ISD::STORE: return LowerSTORE(Op, DAG);
203 case ISD::VAARG: return LowerVAARG(Op, DAG);
204 case ISD::VASTART: return LowerVASTART(Op, DAG);
205 case ISD::SMUL_LOHI: return LowerSMUL_LOHI(Op, DAG);
206 case ISD::UMUL_LOHI: return LowerUMUL_LOHI(Op, DAG);
207 // FIXME: Remove these when LegalizeDAGTypes lands.
208 case ISD::ADD:
209 case ISD::SUB: return ExpandADDSUB(Op.getNode(), DAG);
210 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
211 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
212 case ISD::FRAME_TO_ARGS_OFFSET: return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
213 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
214 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
215 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
217 return LowerATOMIC_FENCE(Op, DAG);
218 default:
219 llvm_unreachable("unimplemented operand");
220 }
221}
222
223/// ReplaceNodeResults - Replace the results of node with an illegal result
224/// type with new values built out of custom code.
227 SelectionDAG &DAG) const {
228 switch (N->getOpcode()) {
229 default:
230 llvm_unreachable("Don't know how to custom expand this!");
231 case ISD::ADD:
232 case ISD::SUB:
233 Results.push_back(ExpandADDSUB(N, DAG));
234 return;
235 }
236}
237
238//===----------------------------------------------------------------------===//
239// Misc Lower Operation implementation
240//===----------------------------------------------------------------------===//
241
242SDValue XCoreTargetLowering::getGlobalAddressWrapper(SDValue GA,
243 const GlobalValue *GV,
244 SelectionDAG &DAG) const {
245 // FIXME there is no actual debug info here
246 SDLoc dl(GA);
247
248 if (GV->getValueType()->isFunctionTy())
249 return DAG.getNode(XCoreISD::PCRelativeWrapper, dl, MVT::i32, GA);
250
251 const auto *GVar = dyn_cast<GlobalVariable>(GV);
252 if ((GV->hasSection() && GV->getSection().starts_with(".cp.")) ||
253 (GVar && GVar->isConstant() && GV->hasLocalLinkage()))
254 return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, GA);
255
256 return DAG.getNode(XCoreISD::DPRelativeWrapper, dl, MVT::i32, GA);
257}
258
259static bool IsSmallObject(const GlobalValue *GV, const XCoreTargetLowering &XTL) {
261 return true;
262
263 Type *ObjType = GV->getValueType();
264 if (!ObjType->isSized())
265 return false;
266
267 auto &DL = GV->getDataLayout();
268 unsigned ObjSize = DL.getTypeAllocSize(ObjType);
269 return ObjSize < CodeModelLargeSize && ObjSize != 0;
270}
271
272SDValue XCoreTargetLowering::
273LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const
274{
275 const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op);
276 const GlobalValue *GV = GN->getGlobal();
277 SDLoc DL(GN);
278 int64_t Offset = GN->getOffset();
279 if (IsSmallObject(GV, *this)) {
280 // We can only fold positive offsets that are a multiple of the word size.
281 int64_t FoldedOffset = std::max(Offset & ~3, (int64_t)0);
282 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, FoldedOffset);
283 GA = getGlobalAddressWrapper(GA, GV, DAG);
284 // Handle the rest of the offset.
285 if (Offset != FoldedOffset) {
286 SDValue Remaining = DAG.getConstant(Offset - FoldedOffset, DL, MVT::i32);
287 GA = DAG.getNode(ISD::ADD, DL, MVT::i32, GA, Remaining);
288 }
289 return GA;
290 } else {
291 // Ideally we would not fold in offset with an index <= 11.
292 Type *Ty = Type::getInt32Ty(*DAG.getContext());
293 Constant *Idx = ConstantInt::get(Ty, Offset);
295 Type::getInt8Ty(*DAG.getContext()), const_cast<GlobalValue *>(GV), Idx);
296 SDValue CP = DAG.getConstantPool(GAI, MVT::i32);
297 return DAG.getLoad(getPointerTy(DAG.getDataLayout()), DL,
298 DAG.getEntryNode(), CP, MachinePointerInfo());
299 }
300}
301
302SDValue XCoreTargetLowering::
303LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const
304{
305 SDLoc DL(Op);
306 auto PtrVT = getPointerTy(DAG.getDataLayout());
307 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
308 SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT);
309
310 return DAG.getNode(XCoreISD::PCRelativeWrapper, DL, PtrVT, Result);
311}
312
313SDValue XCoreTargetLowering::
314LowerConstantPool(SDValue Op, SelectionDAG &DAG) const
315{
316 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
317 // FIXME there isn't really debug info here
318 SDLoc dl(CP);
319 EVT PtrVT = Op.getValueType();
320 SDValue Res;
321 if (CP->isMachineConstantPoolEntry()) {
322 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
323 CP->getAlign(), CP->getOffset());
324 } else {
325 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, CP->getAlign(),
326 CP->getOffset());
327 }
328 return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, Res);
329}
330
333}
334
335SDValue XCoreTargetLowering::
336LowerBR_JT(SDValue Op, SelectionDAG &DAG) const
337{
338 SDValue Chain = Op.getOperand(0);
339 SDValue Table = Op.getOperand(1);
340 SDValue Index = Op.getOperand(2);
341 SDLoc dl(Op);
342 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
343 unsigned JTI = JT->getIndex();
345 const MachineJumpTableInfo *MJTI = MF.getJumpTableInfo();
346 SDValue TargetJT = DAG.getTargetJumpTable(JT->getIndex(), MVT::i32);
347
348 unsigned NumEntries = MJTI->getJumpTables()[JTI].MBBs.size();
349 if (NumEntries <= 32) {
350 return DAG.getNode(XCoreISD::BR_JT, dl, MVT::Other, Chain, TargetJT, Index);
351 }
352 assert((NumEntries >> 31) == 0);
353 SDValue ScaledIndex = DAG.getNode(ISD::SHL, dl, MVT::i32, Index,
354 DAG.getConstant(1, dl, MVT::i32));
355 return DAG.getNode(XCoreISD::BR_JT32, dl, MVT::Other, Chain, TargetJT,
356 ScaledIndex);
357}
358
359SDValue XCoreTargetLowering::lowerLoadWordFromAlignedBasePlusOffset(
360 const SDLoc &DL, SDValue Chain, SDValue Base, int64_t Offset,
361 SelectionDAG &DAG) const {
362 auto PtrVT = getPointerTy(DAG.getDataLayout());
363 if ((Offset & 0x3) == 0) {
364 return DAG.getLoad(PtrVT, DL, Chain, Base, MachinePointerInfo());
365 }
366 // Lower to pair of consecutive word aligned loads plus some bit shifting.
367 int32_t HighOffset = alignTo(Offset, 4);
368 int32_t LowOffset = HighOffset - 4;
369 SDValue LowAddr, HighAddr;
370 if (GlobalAddressSDNode *GASD =
371 dyn_cast<GlobalAddressSDNode>(Base.getNode())) {
372 LowAddr = DAG.getGlobalAddress(GASD->getGlobal(), DL, Base.getValueType(),
373 LowOffset);
374 HighAddr = DAG.getGlobalAddress(GASD->getGlobal(), DL, Base.getValueType(),
375 HighOffset);
376 } else {
377 LowAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base,
378 DAG.getConstant(LowOffset, DL, MVT::i32));
379 HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base,
380 DAG.getConstant(HighOffset, DL, MVT::i32));
381 }
382 SDValue LowShift = DAG.getConstant((Offset - LowOffset) * 8, DL, MVT::i32);
383 SDValue HighShift = DAG.getConstant((HighOffset - Offset) * 8, DL, MVT::i32);
384
385 SDValue Low = DAG.getLoad(PtrVT, DL, Chain, LowAddr, MachinePointerInfo());
386 SDValue High = DAG.getLoad(PtrVT, DL, Chain, HighAddr, MachinePointerInfo());
387 SDValue LowShifted = DAG.getNode(ISD::SRL, DL, MVT::i32, Low, LowShift);
388 SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High, HighShift);
389 SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, LowShifted, HighShifted);
390 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1),
391 High.getValue(1));
392 SDValue Ops[] = { Result, Chain };
393 return DAG.getMergeValues(Ops, DL);
394}
395
397{
398 KnownBits Known = DAG.computeKnownBits(Value);
399 return Known.countMinTrailingZeros() >= 2;
400}
401
402SDValue XCoreTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
403 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
404 LLVMContext &Context = *DAG.getContext();
405 LoadSDNode *LD = cast<LoadSDNode>(Op);
406 assert(LD->getExtensionType() == ISD::NON_EXTLOAD &&
407 "Unexpected extension type");
408 assert(LD->getMemoryVT() == MVT::i32 && "Unexpected load EVT");
409
411 LD->getMemoryVT(), *LD->getMemOperand()))
412 return SDValue();
413
414 SDValue Chain = LD->getChain();
415 SDValue BasePtr = LD->getBasePtr();
416 SDLoc DL(Op);
417
418 if (!LD->isVolatile()) {
419 const GlobalValue *GV;
420 int64_t Offset = 0;
421 if (DAG.isBaseWithConstantOffset(BasePtr) &&
422 isWordAligned(BasePtr->getOperand(0), DAG)) {
423 SDValue NewBasePtr = BasePtr->getOperand(0);
424 Offset = cast<ConstantSDNode>(BasePtr->getOperand(1))->getSExtValue();
425 return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, NewBasePtr,
426 Offset, DAG);
427 }
428 if (TLI.isGAPlusOffset(BasePtr.getNode(), GV, Offset) &&
429 GV->getPointerAlignment(DAG.getDataLayout()) >= 4) {
430 SDValue NewBasePtr = DAG.getGlobalAddress(GV, DL,
431 BasePtr->getValueType(0));
432 return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, NewBasePtr,
433 Offset, DAG);
434 }
435 }
436
437 if (LD->getAlign() == Align(2)) {
438 SDValue Low = DAG.getExtLoad(ISD::ZEXTLOAD, DL, MVT::i32, Chain, BasePtr,
439 LD->getPointerInfo(), MVT::i16, Align(2),
440 LD->getMemOperand()->getFlags());
441 SDValue HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
442 DAG.getConstant(2, DL, MVT::i32));
443 SDValue High =
444 DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain, HighAddr,
445 LD->getPointerInfo().getWithOffset(2), MVT::i16,
446 Align(2), LD->getMemOperand()->getFlags());
447 SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High,
448 DAG.getConstant(16, DL, MVT::i32));
449 SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, Low, HighShifted);
450 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1),
451 High.getValue(1));
452 SDValue Ops[] = { Result, Chain };
453 return DAG.getMergeValues(Ops, DL);
454 }
455
456 // Lower to a call to __misaligned_load(BasePtr).
457 Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(Context);
460
461 Entry.Ty = IntPtrTy;
462 Entry.Node = BasePtr;
463 Args.push_back(Entry);
464
466 CLI.setDebugLoc(DL).setChain(Chain).setLibCallee(
467 CallingConv::C, IntPtrTy,
468 DAG.getExternalSymbol("__misaligned_load",
470 std::move(Args));
471
472 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
473 SDValue Ops[] = { CallResult.first, CallResult.second };
474 return DAG.getMergeValues(Ops, DL);
475}
476
477SDValue XCoreTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
478 LLVMContext &Context = *DAG.getContext();
479 StoreSDNode *ST = cast<StoreSDNode>(Op);
480 assert(!ST->isTruncatingStore() && "Unexpected store type");
481 assert(ST->getMemoryVT() == MVT::i32 && "Unexpected store EVT");
482
484 ST->getMemoryVT(), *ST->getMemOperand()))
485 return SDValue();
486
487 SDValue Chain = ST->getChain();
488 SDValue BasePtr = ST->getBasePtr();
489 SDValue Value = ST->getValue();
490 SDLoc dl(Op);
491
492 if (ST->getAlign() == Align(2)) {
493 SDValue Low = Value;
494 SDValue High = DAG.getNode(ISD::SRL, dl, MVT::i32, Value,
495 DAG.getConstant(16, dl, MVT::i32));
496 SDValue StoreLow =
497 DAG.getTruncStore(Chain, dl, Low, BasePtr, ST->getPointerInfo(),
498 MVT::i16, Align(2), ST->getMemOperand()->getFlags());
499 SDValue HighAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, BasePtr,
500 DAG.getConstant(2, dl, MVT::i32));
501 SDValue StoreHigh = DAG.getTruncStore(
502 Chain, dl, High, HighAddr, ST->getPointerInfo().getWithOffset(2),
503 MVT::i16, Align(2), ST->getMemOperand()->getFlags());
504 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, StoreLow, StoreHigh);
505 }
506
507 // Lower to a call to __misaligned_store(BasePtr, Value).
508 Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(Context);
511
512 Entry.Ty = IntPtrTy;
513 Entry.Node = BasePtr;
514 Args.push_back(Entry);
515
516 Entry.Node = Value;
517 Args.push_back(Entry);
518
520 CLI.setDebugLoc(dl).setChain(Chain).setCallee(
522 DAG.getExternalSymbol("__misaligned_store",
524 std::move(Args));
525
526 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
527 return CallResult.second;
528}
529
530SDValue XCoreTargetLowering::
531LowerSMUL_LOHI(SDValue Op, SelectionDAG &DAG) const
532{
533 assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::SMUL_LOHI &&
534 "Unexpected operand to lower!");
535 SDLoc dl(Op);
536 SDValue LHS = Op.getOperand(0);
537 SDValue RHS = Op.getOperand(1);
538 SDValue Zero = DAG.getConstant(0, dl, MVT::i32);
540 DAG.getVTList(MVT::i32, MVT::i32), Zero, Zero,
541 LHS, RHS);
542 SDValue Lo(Hi.getNode(), 1);
543 SDValue Ops[] = { Lo, Hi };
544 return DAG.getMergeValues(Ops, dl);
545}
546
547SDValue XCoreTargetLowering::
548LowerUMUL_LOHI(SDValue Op, SelectionDAG &DAG) const
549{
550 assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::UMUL_LOHI &&
551 "Unexpected operand to lower!");
552 SDLoc dl(Op);
553 SDValue LHS = Op.getOperand(0);
554 SDValue RHS = Op.getOperand(1);
555 SDValue Zero = DAG.getConstant(0, dl, MVT::i32);
557 DAG.getVTList(MVT::i32, MVT::i32), LHS, RHS,
558 Zero, Zero);
559 SDValue Lo(Hi.getNode(), 1);
560 SDValue Ops[] = { Lo, Hi };
561 return DAG.getMergeValues(Ops, dl);
562}
563
564/// isADDADDMUL - Return whether Op is in a form that is equivalent to
565/// add(add(mul(x,y),a),b). If requireIntermediatesHaveOneUse is true then
566/// each intermediate result in the calculation must also have a single use.
567/// If the Op is in the correct form the constituent parts are written to Mul0,
568/// Mul1, Addend0 and Addend1.
569static bool
570isADDADDMUL(SDValue Op, SDValue &Mul0, SDValue &Mul1, SDValue &Addend0,
571 SDValue &Addend1, bool requireIntermediatesHaveOneUse)
572{
573 if (Op.getOpcode() != ISD::ADD)
574 return false;
575 SDValue N0 = Op.getOperand(0);
576 SDValue N1 = Op.getOperand(1);
577 SDValue AddOp;
578 SDValue OtherOp;
579 if (N0.getOpcode() == ISD::ADD) {
580 AddOp = N0;
581 OtherOp = N1;
582 } else if (N1.getOpcode() == ISD::ADD) {
583 AddOp = N1;
584 OtherOp = N0;
585 } else {
586 return false;
587 }
588 if (requireIntermediatesHaveOneUse && !AddOp.hasOneUse())
589 return false;
590 if (OtherOp.getOpcode() == ISD::MUL) {
591 // add(add(a,b),mul(x,y))
592 if (requireIntermediatesHaveOneUse && !OtherOp.hasOneUse())
593 return false;
594 Mul0 = OtherOp.getOperand(0);
595 Mul1 = OtherOp.getOperand(1);
596 Addend0 = AddOp.getOperand(0);
597 Addend1 = AddOp.getOperand(1);
598 return true;
599 }
600 if (AddOp.getOperand(0).getOpcode() == ISD::MUL) {
601 // add(add(mul(x,y),a),b)
602 if (requireIntermediatesHaveOneUse && !AddOp.getOperand(0).hasOneUse())
603 return false;
604 Mul0 = AddOp.getOperand(0).getOperand(0);
605 Mul1 = AddOp.getOperand(0).getOperand(1);
606 Addend0 = AddOp.getOperand(1);
607 Addend1 = OtherOp;
608 return true;
609 }
610 if (AddOp.getOperand(1).getOpcode() == ISD::MUL) {
611 // add(add(a,mul(x,y)),b)
612 if (requireIntermediatesHaveOneUse && !AddOp.getOperand(1).hasOneUse())
613 return false;
614 Mul0 = AddOp.getOperand(1).getOperand(0);
615 Mul1 = AddOp.getOperand(1).getOperand(1);
616 Addend0 = AddOp.getOperand(0);
617 Addend1 = OtherOp;
618 return true;
619 }
620 return false;
621}
622
623SDValue XCoreTargetLowering::
624TryExpandADDWithMul(SDNode *N, SelectionDAG &DAG) const
625{
626 SDValue Mul;
628 if (N->getOperand(0).getOpcode() == ISD::MUL) {
629 Mul = N->getOperand(0);
630 Other = N->getOperand(1);
631 } else if (N->getOperand(1).getOpcode() == ISD::MUL) {
632 Mul = N->getOperand(1);
633 Other = N->getOperand(0);
634 } else {
635 return SDValue();
636 }
637 SDLoc dl(N);
638 SDValue LL, RL, AddendL, AddendH;
639 LL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
640 Mul.getOperand(0), DAG.getConstant(0, dl, MVT::i32));
641 RL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
642 Mul.getOperand(1), DAG.getConstant(0, dl, MVT::i32));
643 AddendL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
644 Other, DAG.getConstant(0, dl, MVT::i32));
645 AddendH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
646 Other, DAG.getConstant(1, dl, MVT::i32));
647 APInt HighMask = APInt::getHighBitsSet(64, 32);
648 unsigned LHSSB = DAG.ComputeNumSignBits(Mul.getOperand(0));
649 unsigned RHSSB = DAG.ComputeNumSignBits(Mul.getOperand(1));
650 if (DAG.MaskedValueIsZero(Mul.getOperand(0), HighMask) &&
651 DAG.MaskedValueIsZero(Mul.getOperand(1), HighMask)) {
652 // The inputs are both zero-extended.
654 DAG.getVTList(MVT::i32, MVT::i32), AddendH,
655 AddendL, LL, RL);
656 SDValue Lo(Hi.getNode(), 1);
657 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
658 }
659 if (LHSSB > 32 && RHSSB > 32) {
660 // The inputs are both sign-extended.
662 DAG.getVTList(MVT::i32, MVT::i32), AddendH,
663 AddendL, LL, RL);
664 SDValue Lo(Hi.getNode(), 1);
665 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
666 }
667 SDValue LH, RH;
668 LH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
669 Mul.getOperand(0), DAG.getConstant(1, dl, MVT::i32));
670 RH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
671 Mul.getOperand(1), DAG.getConstant(1, dl, MVT::i32));
673 DAG.getVTList(MVT::i32, MVT::i32), AddendH,
674 AddendL, LL, RL);
675 SDValue Lo(Hi.getNode(), 1);
676 RH = DAG.getNode(ISD::MUL, dl, MVT::i32, LL, RH);
677 LH = DAG.getNode(ISD::MUL, dl, MVT::i32, LH, RL);
678 Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, RH);
679 Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, LH);
680 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
681}
682
683SDValue XCoreTargetLowering::
684ExpandADDSUB(SDNode *N, SelectionDAG &DAG) const
685{
686 assert(N->getValueType(0) == MVT::i64 &&
687 (N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) &&
688 "Unknown operand to lower!");
689
690 if (N->getOpcode() == ISD::ADD)
691 if (SDValue Result = TryExpandADDWithMul(N, DAG))
692 return Result;
693
694 SDLoc dl(N);
695
696 // Extract components
697 SDValue LHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
698 N->getOperand(0),
699 DAG.getConstant(0, dl, MVT::i32));
700 SDValue LHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
701 N->getOperand(0),
702 DAG.getConstant(1, dl, MVT::i32));
703 SDValue RHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
704 N->getOperand(1),
705 DAG.getConstant(0, dl, MVT::i32));
706 SDValue RHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
707 N->getOperand(1),
708 DAG.getConstant(1, dl, MVT::i32));
709
710 // Expand
711 unsigned Opcode = (N->getOpcode() == ISD::ADD) ? XCoreISD::LADD :
713 SDValue Zero = DAG.getConstant(0, dl, MVT::i32);
714 SDValue Lo = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
715 LHSL, RHSL, Zero);
716 SDValue Carry(Lo.getNode(), 1);
717
718 SDValue Hi = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
719 LHSH, RHSH, Carry);
720 SDValue Ignored(Hi.getNode(), 1);
721 // Merge the pieces
722 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
723}
724
725SDValue XCoreTargetLowering::
726LowerVAARG(SDValue Op, SelectionDAG &DAG) const
727{
728 // Whist llvm does not support aggregate varargs we can ignore
729 // the possibility of the ValueType being an implicit byVal vararg.
730 SDNode *Node = Op.getNode();
731 EVT VT = Node->getValueType(0); // not an aggregate
732 SDValue InChain = Node->getOperand(0);
733 SDValue VAListPtr = Node->getOperand(1);
734 EVT PtrVT = VAListPtr.getValueType();
735 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
736 SDLoc dl(Node);
737 SDValue VAList =
738 DAG.getLoad(PtrVT, dl, InChain, VAListPtr, MachinePointerInfo(SV));
739 // Increment the pointer, VAList, to the next vararg
740 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAList,
741 DAG.getIntPtrConstant(VT.getSizeInBits() / 8,
742 dl));
743 // Store the incremented VAList to the legalized pointer
744 InChain = DAG.getStore(VAList.getValue(1), dl, nextPtr, VAListPtr,
746 // Load the actual argument out of the pointer VAList
747 return DAG.getLoad(VT, dl, InChain, VAList, MachinePointerInfo());
748}
749
750SDValue XCoreTargetLowering::
751LowerVASTART(SDValue Op, SelectionDAG &DAG) const
752{
753 SDLoc dl(Op);
754 // vastart stores the address of the VarArgsFrameIndex slot into the
755 // memory location argument
758 SDValue Addr = DAG.getFrameIndex(XFI->getVarArgsFrameIndex(), MVT::i32);
759 return DAG.getStore(Op.getOperand(0), dl, Addr, Op.getOperand(1),
761}
762
763SDValue XCoreTargetLowering::LowerFRAMEADDR(SDValue Op,
764 SelectionDAG &DAG) const {
765 // This nodes represent llvm.frameaddress on the DAG.
766 // It takes one operand, the index of the frame address to return.
767 // An index of zero corresponds to the current function's frame address.
768 // An index of one to the parent's frame address, and so on.
769 // Depths > 0 not supported yet!
770 if (Op.getConstantOperandVal(0) > 0)
771 return SDValue();
772
774 const TargetRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
775 return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op),
776 RegInfo->getFrameRegister(MF), MVT::i32);
777}
778
779SDValue XCoreTargetLowering::
780LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const {
781 // This nodes represent llvm.returnaddress on the DAG.
782 // It takes one operand, the index of the return address to return.
783 // An index of zero corresponds to the current function's return address.
784 // An index of one to the parent's return address, and so on.
785 // Depths > 0 not supported yet!
786 if (Op.getConstantOperandVal(0) > 0)
787 return SDValue();
788
791 int FI = XFI->createLRSpillSlot(MF);
792 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
793 return DAG.getLoad(getPointerTy(DAG.getDataLayout()), SDLoc(Op),
794 DAG.getEntryNode(), FIN,
796}
797
798SDValue XCoreTargetLowering::
799LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const {
800 // This node represents offset from frame pointer to first on-stack argument.
801 // This is needed for correct stack adjustment during unwind.
802 // However, we don't know the offset until after the frame has be finalised.
803 // This is done during the XCoreFTAOElim pass.
804 return DAG.getNode(XCoreISD::FRAME_TO_ARGS_OFFSET, SDLoc(Op), MVT::i32);
805}
806
807SDValue XCoreTargetLowering::
808LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
809 // OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER)
810 // This node represents 'eh_return' gcc dwarf builtin, which is used to
811 // return from exception. The general meaning is: adjust stack by OFFSET and
812 // pass execution to HANDLER.
814 SDValue Chain = Op.getOperand(0);
815 SDValue Offset = Op.getOperand(1);
816 SDValue Handler = Op.getOperand(2);
817 SDLoc dl(Op);
818
819 // Absolute SP = (FP + FrameToArgs) + Offset
820 const TargetRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
822 RegInfo->getFrameRegister(MF), MVT::i32);
823 SDValue FrameToArgs = DAG.getNode(XCoreISD::FRAME_TO_ARGS_OFFSET, dl,
824 MVT::i32);
825 Stack = DAG.getNode(ISD::ADD, dl, MVT::i32, Stack, FrameToArgs);
826 Stack = DAG.getNode(ISD::ADD, dl, MVT::i32, Stack, Offset);
827
828 // R0=ExceptionPointerRegister R1=ExceptionSelectorRegister
829 // which leaves 2 caller saved registers, R2 & R3 for us to use.
830 unsigned StackReg = XCore::R2;
831 unsigned HandlerReg = XCore::R3;
832
833 SDValue OutChains[] = {
834 DAG.getCopyToReg(Chain, dl, StackReg, Stack),
835 DAG.getCopyToReg(Chain, dl, HandlerReg, Handler)
836 };
837
838 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
839
840 return DAG.getNode(XCoreISD::EH_RETURN, dl, MVT::Other, Chain,
841 DAG.getRegister(StackReg, MVT::i32),
842 DAG.getRegister(HandlerReg, MVT::i32));
843
844}
845
846SDValue XCoreTargetLowering::
847LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const {
848 return Op.getOperand(0);
849}
850
851SDValue XCoreTargetLowering::
852LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const {
853 SDValue Chain = Op.getOperand(0);
854 SDValue Trmp = Op.getOperand(1); // trampoline
855 SDValue FPtr = Op.getOperand(2); // nested function
856 SDValue Nest = Op.getOperand(3); // 'nest' parameter value
857
858 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
859
860 // .align 4
861 // LDAPF_u10 r11, nest
862 // LDW_2rus r11, r11[0]
863 // STWSP_ru6 r11, sp[0]
864 // LDAPF_u10 r11, fptr
865 // LDW_2rus r11, r11[0]
866 // BAU_1r r11
867 // nest:
868 // .word nest
869 // fptr:
870 // .word fptr
871 SDValue OutChains[5];
872
873 SDValue Addr = Trmp;
874
875 SDLoc dl(Op);
876 OutChains[0] =
877 DAG.getStore(Chain, dl, DAG.getConstant(0x0a3cd805, dl, MVT::i32), Addr,
878 MachinePointerInfo(TrmpAddr));
879
880 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
881 DAG.getConstant(4, dl, MVT::i32));
882 OutChains[1] =
883 DAG.getStore(Chain, dl, DAG.getConstant(0xd80456c0, dl, MVT::i32), Addr,
884 MachinePointerInfo(TrmpAddr, 4));
885
886 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
887 DAG.getConstant(8, dl, MVT::i32));
888 OutChains[2] =
889 DAG.getStore(Chain, dl, DAG.getConstant(0x27fb0a3c, dl, MVT::i32), Addr,
890 MachinePointerInfo(TrmpAddr, 8));
891
892 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
893 DAG.getConstant(12, dl, MVT::i32));
894 OutChains[3] =
895 DAG.getStore(Chain, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 12));
896
897 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
898 DAG.getConstant(16, dl, MVT::i32));
899 OutChains[4] =
900 DAG.getStore(Chain, dl, FPtr, Addr, MachinePointerInfo(TrmpAddr, 16));
901
902 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
903}
904
905SDValue XCoreTargetLowering::
906LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const {
907 SDLoc DL(Op);
908 unsigned IntNo = Op.getConstantOperandVal(0);
909 switch (IntNo) {
910 case Intrinsic::xcore_crc8:
911 EVT VT = Op.getValueType();
912 SDValue Data =
913 DAG.getNode(XCoreISD::CRC8, DL, DAG.getVTList(VT, VT),
914 Op.getOperand(1), Op.getOperand(2) , Op.getOperand(3));
915 SDValue Crc(Data.getNode(), 1);
916 SDValue Results[] = { Crc, Data };
917 return DAG.getMergeValues(Results, DL);
918 }
919 return SDValue();
920}
921
922SDValue XCoreTargetLowering::
923LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const {
924 SDLoc DL(Op);
925 return DAG.getNode(ISD::MEMBARRIER, DL, MVT::Other, Op.getOperand(0));
926}
927
928//===----------------------------------------------------------------------===//
929// Calling Convention Implementation
930//===----------------------------------------------------------------------===//
931
932#include "XCoreGenCallingConv.inc"
933
934//===----------------------------------------------------------------------===//
935// Call Calling Convention Implementation
936//===----------------------------------------------------------------------===//
937
938/// XCore call implementation
940XCoreTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
941 SmallVectorImpl<SDValue> &InVals) const {
942 SelectionDAG &DAG = CLI.DAG;
943 SDLoc &dl = CLI.DL;
945 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
947 SDValue Chain = CLI.Chain;
948 SDValue Callee = CLI.Callee;
949 bool &isTailCall = CLI.IsTailCall;
950 CallingConv::ID CallConv = CLI.CallConv;
951 bool isVarArg = CLI.IsVarArg;
952
953 // XCore target does not yet support tail call optimization.
954 isTailCall = false;
955
956 // For now, only CallingConv::C implemented
957 switch (CallConv)
958 {
959 default:
960 report_fatal_error("Unsupported calling convention");
962 case CallingConv::C:
963 return LowerCCCCallTo(Chain, Callee, CallConv, isVarArg, isTailCall,
964 Outs, OutVals, Ins, dl, DAG, InVals);
965 }
966}
967
968/// LowerCallResult - Lower the result values of a call into the
969/// appropriate copies out of appropriate physical registers / memory locations.
971 const SmallVectorImpl<CCValAssign> &RVLocs,
972 const SDLoc &dl, SelectionDAG &DAG,
973 SmallVectorImpl<SDValue> &InVals) {
974 SmallVector<std::pair<int, unsigned>, 4> ResultMemLocs;
975 // Copy results out of physical registers.
976 for (const CCValAssign &VA : RVLocs) {
977 if (VA.isRegLoc()) {
978 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getValVT(),
979 InGlue).getValue(1);
980 InGlue = Chain.getValue(2);
981 InVals.push_back(Chain.getValue(0));
982 } else {
983 assert(VA.isMemLoc());
984 ResultMemLocs.push_back(std::make_pair(VA.getLocMemOffset(),
985 InVals.size()));
986 // Reserve space for this result.
987 InVals.push_back(SDValue());
988 }
989 }
990
991 // Copy results out of memory.
992 SmallVector<SDValue, 4> MemOpChains;
993 for (unsigned i = 0, e = ResultMemLocs.size(); i != e; ++i) {
994 int offset = ResultMemLocs[i].first;
995 unsigned index = ResultMemLocs[i].second;
996 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
997 SDValue Ops[] = { Chain, DAG.getConstant(offset / 4, dl, MVT::i32) };
998 SDValue load = DAG.getNode(XCoreISD::LDWSP, dl, VTs, Ops);
999 InVals[index] = load;
1000 MemOpChains.push_back(load.getValue(1));
1001 }
1002
1003 // Transform all loads nodes into one single node because
1004 // all load nodes are independent of each other.
1005 if (!MemOpChains.empty())
1006 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
1007
1008 return Chain;
1009}
1010
1011/// LowerCCCCallTo - functions arguments are copied from virtual
1012/// regs to (physical regs)/(stack frame), CALLSEQ_START and
1013/// CALLSEQ_END are emitted.
1014/// TODO: isTailCall, sret.
1015SDValue XCoreTargetLowering::LowerCCCCallTo(
1016 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg,
1017 bool isTailCall, const SmallVectorImpl<ISD::OutputArg> &Outs,
1018 const SmallVectorImpl<SDValue> &OutVals,
1019 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1020 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1021
1022 // Analyze operands of the call, assigning locations to each operand.
1024 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1025 *DAG.getContext());
1026
1027 // The ABI dictates there should be one stack slot available to the callee
1028 // on function entry (for saving lr).
1029 CCInfo.AllocateStack(4, Align(4));
1030
1031 CCInfo.AnalyzeCallOperands(Outs, CC_XCore);
1032
1034 // Analyze return values to determine the number of bytes of stack required.
1035 CCState RetCCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1036 *DAG.getContext());
1037 RetCCInfo.AllocateStack(CCInfo.getStackSize(), Align(4));
1038 RetCCInfo.AnalyzeCallResult(Ins, RetCC_XCore);
1039
1040 // Get a count of how many bytes are to be pushed on the stack.
1041 unsigned NumBytes = RetCCInfo.getStackSize();
1042
1043 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
1044
1046 SmallVector<SDValue, 12> MemOpChains;
1047
1048 // Walk the register/memloc assignments, inserting copies/loads.
1049 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1050 CCValAssign &VA = ArgLocs[i];
1051 SDValue Arg = OutVals[i];
1052
1053 // Promote the value if needed.
1054 switch (VA.getLocInfo()) {
1055 default: llvm_unreachable("Unknown loc info!");
1056 case CCValAssign::Full: break;
1057 case CCValAssign::SExt:
1058 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
1059 break;
1060 case CCValAssign::ZExt:
1061 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
1062 break;
1063 case CCValAssign::AExt:
1064 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
1065 break;
1066 }
1067
1068 // Arguments that can be passed on register must be kept at
1069 // RegsToPass vector
1070 if (VA.isRegLoc()) {
1071 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1072 } else {
1073 assert(VA.isMemLoc());
1074
1075 int Offset = VA.getLocMemOffset();
1076
1077 MemOpChains.push_back(DAG.getNode(XCoreISD::STWSP, dl, MVT::Other,
1078 Chain, Arg,
1079 DAG.getConstant(Offset/4, dl,
1080 MVT::i32)));
1081 }
1082 }
1083
1084 // Transform all store nodes into one single node because
1085 // all store nodes are independent of each other.
1086 if (!MemOpChains.empty())
1087 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
1088
1089 // Build a sequence of copy-to-reg nodes chained together with token
1090 // chain and flag operands which copy the outgoing args into registers.
1091 // The InGlue in necessary since all emitted instructions must be
1092 // stuck together.
1093 SDValue InGlue;
1094 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1095 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
1096 RegsToPass[i].second, InGlue);
1097 InGlue = Chain.getValue(1);
1098 }
1099
1100 // If the callee is a GlobalAddress node (quite common, every direct call is)
1101 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1102 // Likewise ExternalSymbol -> TargetExternalSymbol.
1103 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1104 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32);
1105 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
1106 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32);
1107
1108 // XCoreBranchLink = #chain, #target_address, #opt_in_flags...
1109 // = Chain, Callee, Reg#1, Reg#2, ...
1110 //
1111 // Returns a chain & a flag for retval copy to use.
1112 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1114 Ops.push_back(Chain);
1115 Ops.push_back(Callee);
1116
1117 // Add argument registers to the end of the list so that they are
1118 // known live into the call.
1119 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1120 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1121 RegsToPass[i].second.getValueType()));
1122
1123 if (InGlue.getNode())
1124 Ops.push_back(InGlue);
1125
1126 Chain = DAG.getNode(XCoreISD::BL, dl, NodeTys, Ops);
1127 InGlue = Chain.getValue(1);
1128
1129 // Create the CALLSEQ_END node.
1130 Chain = DAG.getCALLSEQ_END(Chain, NumBytes, 0, InGlue, dl);
1131 InGlue = Chain.getValue(1);
1132
1133 // Handle result values, copying them out of physregs into vregs that we
1134 // return.
1135 return LowerCallResult(Chain, InGlue, RVLocs, dl, DAG, InVals);
1136}
1137
1138//===----------------------------------------------------------------------===//
1139// Formal Arguments Calling Convention Implementation
1140//===----------------------------------------------------------------------===//
1141
1142namespace {
1143 struct ArgDataPair { SDValue SDV; ISD::ArgFlagsTy Flags; };
1144}
1145
1146/// XCore formal arguments implementation
1147SDValue XCoreTargetLowering::LowerFormalArguments(
1148 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1149 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1150 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1151 switch (CallConv)
1152 {
1153 default:
1154 report_fatal_error("Unsupported calling convention");
1155 case CallingConv::C:
1156 case CallingConv::Fast:
1157 return LowerCCCArguments(Chain, CallConv, isVarArg,
1158 Ins, dl, DAG, InVals);
1159 }
1160}
1161
1162/// LowerCCCArguments - transform physical registers into
1163/// virtual registers and generate load operations for
1164/// arguments places on the stack.
1165/// TODO: sret
1166SDValue XCoreTargetLowering::LowerCCCArguments(
1167 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1168 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1169 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1171 MachineFrameInfo &MFI = MF.getFrameInfo();
1174
1175 // Assign locations to all of the incoming arguments.
1177 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1178 *DAG.getContext());
1179
1180 CCInfo.AnalyzeFormalArguments(Ins, CC_XCore);
1181
1182 unsigned StackSlotSize = XCoreFrameLowering::stackSlotSize();
1183
1184 unsigned LRSaveSize = StackSlotSize;
1185
1186 if (!isVarArg)
1187 XFI->setReturnStackOffset(CCInfo.getStackSize() + LRSaveSize);
1188
1189 // All getCopyFromReg ops must precede any getMemcpys to prevent the
1190 // scheduler clobbering a register before it has been copied.
1191 // The stages are:
1192 // 1. CopyFromReg (and load) arg & vararg registers.
1193 // 2. Chain CopyFromReg nodes into a TokenFactor.
1194 // 3. Memcpy 'byVal' args & push final InVals.
1195 // 4. Chain mem ops nodes into a TokenFactor.
1196 SmallVector<SDValue, 4> CFRegNode;
1199
1200 // 1a. CopyFromReg (and load) arg registers.
1201 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1202
1203 CCValAssign &VA = ArgLocs[i];
1204 SDValue ArgIn;
1205
1206 if (VA.isRegLoc()) {
1207 // Arguments passed in registers
1208 EVT RegVT = VA.getLocVT();
1209 switch (RegVT.getSimpleVT().SimpleTy) {
1210 default:
1211 {
1212#ifndef NDEBUG
1213 errs() << "LowerFormalArguments Unhandled argument type: "
1214 << RegVT << "\n";
1215#endif
1216 llvm_unreachable(nullptr);
1217 }
1218 case MVT::i32:
1219 Register VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass);
1220 RegInfo.addLiveIn(VA.getLocReg(), VReg);
1221 ArgIn = DAG.getCopyFromReg(Chain, dl, VReg, RegVT);
1222 CFRegNode.push_back(ArgIn.getValue(ArgIn->getNumValues() - 1));
1223 }
1224 } else {
1225 // Only arguments passed on the stack should make it here.
1226 assert(VA.isMemLoc());
1227 // Load the argument to a virtual register
1228 unsigned ObjSize = VA.getLocVT().getSizeInBits()/8;
1229 if (ObjSize > StackSlotSize) {
1230 errs() << "LowerFormalArguments Unhandled argument type: "
1231 << VA.getLocVT() << "\n";
1232 }
1233 // Create the frame index object for this incoming parameter...
1234 int FI = MFI.CreateFixedObject(ObjSize,
1235 LRSaveSize + VA.getLocMemOffset(),
1236 true);
1237
1238 // Create the SelectionDAG nodes corresponding to a load
1239 //from this parameter
1240 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1241 ArgIn = DAG.getLoad(VA.getLocVT(), dl, Chain, FIN,
1243 }
1244 const ArgDataPair ADP = { ArgIn, Ins[i].Flags };
1245 ArgData.push_back(ADP);
1246 }
1247
1248 // 1b. CopyFromReg vararg registers.
1249 if (isVarArg) {
1250 // Argument registers
1251 static const MCPhysReg ArgRegs[] = {
1252 XCore::R0, XCore::R1, XCore::R2, XCore::R3
1253 };
1255 unsigned FirstVAReg = CCInfo.getFirstUnallocated(ArgRegs);
1256 if (FirstVAReg < std::size(ArgRegs)) {
1257 int offset = 0;
1258 // Save remaining registers, storing higher register numbers at a higher
1259 // address
1260 for (int i = std::size(ArgRegs) - 1; i >= (int)FirstVAReg; --i) {
1261 // Create a stack slot
1262 int FI = MFI.CreateFixedObject(4, offset, true);
1263 if (i == (int)FirstVAReg) {
1264 XFI->setVarArgsFrameIndex(FI);
1265 }
1266 offset -= StackSlotSize;
1267 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1268 // Move argument from phys reg -> virt reg
1269 Register VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass);
1270 RegInfo.addLiveIn(ArgRegs[i], VReg);
1271 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
1272 CFRegNode.push_back(Val.getValue(Val->getNumValues() - 1));
1273 // Move argument from virt reg -> stack
1274 SDValue Store =
1275 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
1276 MemOps.push_back(Store);
1277 }
1278 } else {
1279 // This will point to the next argument passed via stack.
1281 MFI.CreateFixedObject(4, LRSaveSize + CCInfo.getStackSize(), true));
1282 }
1283 }
1284
1285 // 2. chain CopyFromReg nodes into a TokenFactor.
1286 if (!CFRegNode.empty())
1287 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, CFRegNode);
1288
1289 // 3. Memcpy 'byVal' args & push final InVals.
1290 // Aggregates passed "byVal" need to be copied by the callee.
1291 // The callee will use a pointer to this copy, rather than the original
1292 // pointer.
1293 for (const ArgDataPair &ArgDI : ArgData) {
1294 if (ArgDI.Flags.isByVal() && ArgDI.Flags.getByValSize()) {
1295 unsigned Size = ArgDI.Flags.getByValSize();
1296 Align Alignment =
1297 std::max(Align(StackSlotSize), ArgDI.Flags.getNonZeroByValAlign());
1298 // Create a new object on the stack and copy the pointee into it.
1299 int FI = MFI.CreateStackObject(Size, Alignment, false);
1300 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1301 InVals.push_back(FIN);
1302 MemOps.push_back(DAG.getMemcpy(
1303 Chain, dl, FIN, ArgDI.SDV, DAG.getConstant(Size, dl, MVT::i32),
1304 Alignment, false, false, /*CI=*/nullptr, std::nullopt,
1306 } else {
1307 InVals.push_back(ArgDI.SDV);
1308 }
1309 }
1310
1311 // 4, chain mem ops nodes into a TokenFactor.
1312 if (!MemOps.empty()) {
1313 MemOps.push_back(Chain);
1314 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
1315 }
1316
1317 return Chain;
1318}
1319
1320//===----------------------------------------------------------------------===//
1321// Return Value Calling Convention Implementation
1322//===----------------------------------------------------------------------===//
1323
1324bool XCoreTargetLowering::
1325CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
1326 bool isVarArg,
1328 LLVMContext &Context) const {
1330 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
1331 if (!CCInfo.CheckReturn(Outs, RetCC_XCore))
1332 return false;
1333 if (CCInfo.getStackSize() != 0 && isVarArg)
1334 return false;
1335 return true;
1336}
1337
1338SDValue
1339XCoreTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
1340 bool isVarArg,
1342 const SmallVectorImpl<SDValue> &OutVals,
1343 const SDLoc &dl, SelectionDAG &DAG) const {
1344
1345 XCoreFunctionInfo *XFI =
1348
1349 // CCValAssign - represent the assignment of
1350 // the return value to a location
1352
1353 // CCState - Info about the registers and stack slot.
1354 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1355 *DAG.getContext());
1356
1357 // Analyze return values.
1358 if (!isVarArg)
1359 CCInfo.AllocateStack(XFI->getReturnStackOffset(), Align(4));
1360
1361 CCInfo.AnalyzeReturn(Outs, RetCC_XCore);
1362
1363 SDValue Glue;
1364 SmallVector<SDValue, 4> RetOps(1, Chain);
1365
1366 // Return on XCore is always a "retsp 0"
1367 RetOps.push_back(DAG.getConstant(0, dl, MVT::i32));
1368
1369 SmallVector<SDValue, 4> MemOpChains;
1370 // Handle return values that must be copied to memory.
1371 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1372 CCValAssign &VA = RVLocs[i];
1373 if (VA.isRegLoc())
1374 continue;
1375 assert(VA.isMemLoc());
1376 if (isVarArg) {
1377 report_fatal_error("Can't return value from vararg function in memory");
1378 }
1379
1380 int Offset = VA.getLocMemOffset();
1381 unsigned ObjSize = VA.getLocVT().getSizeInBits() / 8;
1382 // Create the frame index object for the memory location.
1383 int FI = MFI.CreateFixedObject(ObjSize, Offset, false);
1384
1385 // Create a SelectionDAG node corresponding to a store
1386 // to this memory location.
1387 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1388 MemOpChains.push_back(DAG.getStore(
1389 Chain, dl, OutVals[i], FIN,
1391 }
1392
1393 // Transform all store nodes into one single node because
1394 // all stores are independent of each other.
1395 if (!MemOpChains.empty())
1396 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
1397
1398 // Now handle return values copied to registers.
1399 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1400 CCValAssign &VA = RVLocs[i];
1401 if (!VA.isRegLoc())
1402 continue;
1403 // Copy the result values into the output registers.
1404 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Glue);
1405
1406 // guarantee that all emitted copies are
1407 // stuck together, avoiding something bad
1408 Glue = Chain.getValue(1);
1409 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
1410 }
1411
1412 RetOps[0] = Chain; // Update chain.
1413
1414 // Add the glue if we have it.
1415 if (Glue.getNode())
1416 RetOps.push_back(Glue);
1417
1418 return DAG.getNode(XCoreISD::RETSP, dl, MVT::Other, RetOps);
1419}
1420
1421//===----------------------------------------------------------------------===//
1422// Other Lowering Code
1423//===----------------------------------------------------------------------===//
1424
1427 MachineBasicBlock *BB) const {
1428 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
1429 DebugLoc dl = MI.getDebugLoc();
1430 assert((MI.getOpcode() == XCore::SELECT_CC) &&
1431 "Unexpected instr type to insert");
1432
1433 // To "insert" a SELECT_CC instruction, we actually have to insert the diamond
1434 // control-flow pattern. The incoming instruction knows the destination vreg
1435 // to set, the condition code register to branch on, the true/false values to
1436 // select between, and a branch opcode to use.
1437 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1439
1440 // thisMBB:
1441 // ...
1442 // TrueVal = ...
1443 // cmpTY ccX, r1, r2
1444 // bCC copy1MBB
1445 // fallthrough --> copy0MBB
1446 MachineBasicBlock *thisMBB = BB;
1447 MachineFunction *F = BB->getParent();
1448 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
1449 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
1450 F->insert(It, copy0MBB);
1451 F->insert(It, sinkMBB);
1452
1453 // Transfer the remainder of BB and its successor edges to sinkMBB.
1454 sinkMBB->splice(sinkMBB->begin(), BB,
1455 std::next(MachineBasicBlock::iterator(MI)), BB->end());
1457
1458 // Next, add the true and fallthrough blocks as its successors.
1459 BB->addSuccessor(copy0MBB);
1460 BB->addSuccessor(sinkMBB);
1461
1462 BuildMI(BB, dl, TII.get(XCore::BRFT_lru6))
1463 .addReg(MI.getOperand(1).getReg())
1464 .addMBB(sinkMBB);
1465
1466 // copy0MBB:
1467 // %FalseValue = ...
1468 // # fallthrough to sinkMBB
1469 BB = copy0MBB;
1470
1471 // Update machine-CFG edges
1472 BB->addSuccessor(sinkMBB);
1473
1474 // sinkMBB:
1475 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
1476 // ...
1477 BB = sinkMBB;
1478 BuildMI(*BB, BB->begin(), dl, TII.get(XCore::PHI), MI.getOperand(0).getReg())
1479 .addReg(MI.getOperand(3).getReg())
1480 .addMBB(copy0MBB)
1481 .addReg(MI.getOperand(2).getReg())
1482 .addMBB(thisMBB);
1483
1484 MI.eraseFromParent(); // The pseudo instruction is gone now.
1485 return BB;
1486}
1487
1488//===----------------------------------------------------------------------===//
1489// Target Optimization Hooks
1490//===----------------------------------------------------------------------===//
1491
1492SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
1493 DAGCombinerInfo &DCI) const {
1494 SelectionDAG &DAG = DCI.DAG;
1495 SDLoc dl(N);
1496 switch (N->getOpcode()) {
1497 default: break;
1499 switch (N->getConstantOperandVal(1)) {
1500 case Intrinsic::xcore_outt:
1501 case Intrinsic::xcore_outct:
1502 case Intrinsic::xcore_chkct: {
1503 SDValue OutVal = N->getOperand(3);
1504 // These instructions ignore the high bits.
1505 if (OutVal.hasOneUse()) {
1506 unsigned BitWidth = OutVal.getValueSizeInBits();
1507 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 8);
1508 KnownBits Known;
1509 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
1510 !DCI.isBeforeLegalizeOps());
1511 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1512 if (TLI.ShrinkDemandedConstant(OutVal, DemandedMask, TLO) ||
1513 TLI.SimplifyDemandedBits(OutVal, DemandedMask, Known, TLO))
1514 DCI.CommitTargetLoweringOpt(TLO);
1515 }
1516 break;
1517 }
1518 case Intrinsic::xcore_setpt: {
1519 SDValue Time = N->getOperand(3);
1520 // This instruction ignores the high bits.
1521 if (Time.hasOneUse()) {
1522 unsigned BitWidth = Time.getValueSizeInBits();
1523 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 16);
1524 KnownBits Known;
1525 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
1526 !DCI.isBeforeLegalizeOps());
1527 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1528 if (TLI.ShrinkDemandedConstant(Time, DemandedMask, TLO) ||
1529 TLI.SimplifyDemandedBits(Time, DemandedMask, Known, TLO))
1530 DCI.CommitTargetLoweringOpt(TLO);
1531 }
1532 break;
1533 }
1534 }
1535 break;
1536 case XCoreISD::LADD: {
1537 SDValue N0 = N->getOperand(0);
1538 SDValue N1 = N->getOperand(1);
1539 SDValue N2 = N->getOperand(2);
1540 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1541 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1542 EVT VT = N0.getValueType();
1543
1544 // canonicalize constant to RHS
1545 if (N0C && !N1C)
1546 return DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N1, N0, N2);
1547
1548 // fold (ladd 0, 0, x) -> 0, x & 1
1549 if (N0C && N0C->isZero() && N1C && N1C->isZero()) {
1550 SDValue Carry = DAG.getConstant(0, dl, VT);
1551 SDValue Result = DAG.getNode(ISD::AND, dl, VT, N2,
1552 DAG.getConstant(1, dl, VT));
1553 SDValue Ops[] = { Result, Carry };
1554 return DAG.getMergeValues(Ops, dl);
1555 }
1556
1557 // fold (ladd x, 0, y) -> 0, add x, y iff carry is unused and y has only the
1558 // low bit set
1559 if (N1C && N1C->isZero() && N->hasNUsesOfValue(0, 1)) {
1561 VT.getSizeInBits() - 1);
1562 KnownBits Known = DAG.computeKnownBits(N2);
1563 if ((Known.Zero & Mask) == Mask) {
1564 SDValue Carry = DAG.getConstant(0, dl, VT);
1565 SDValue Result = DAG.getNode(ISD::ADD, dl, VT, N0, N2);
1566 SDValue Ops[] = { Result, Carry };
1567 return DAG.getMergeValues(Ops, dl);
1568 }
1569 }
1570 }
1571 break;
1572 case XCoreISD::LSUB: {
1573 SDValue N0 = N->getOperand(0);
1574 SDValue N1 = N->getOperand(1);
1575 SDValue N2 = N->getOperand(2);
1576 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1577 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1578 EVT VT = N0.getValueType();
1579
1580 // fold (lsub 0, 0, x) -> x, -x iff x has only the low bit set
1581 if (N0C && N0C->isZero() && N1C && N1C->isZero()) {
1583 VT.getSizeInBits() - 1);
1584 KnownBits Known = DAG.computeKnownBits(N2);
1585 if ((Known.Zero & Mask) == Mask) {
1586 SDValue Borrow = N2;
1587 SDValue Result = DAG.getNode(ISD::SUB, dl, VT,
1588 DAG.getConstant(0, dl, VT), N2);
1589 SDValue Ops[] = { Result, Borrow };
1590 return DAG.getMergeValues(Ops, dl);
1591 }
1592 }
1593
1594 // fold (lsub x, 0, y) -> 0, sub x, y iff borrow is unused and y has only the
1595 // low bit set
1596 if (N1C && N1C->isZero() && N->hasNUsesOfValue(0, 1)) {
1598 VT.getSizeInBits() - 1);
1599 KnownBits Known = DAG.computeKnownBits(N2);
1600 if ((Known.Zero & Mask) == Mask) {
1601 SDValue Borrow = DAG.getConstant(0, dl, VT);
1602 SDValue Result = DAG.getNode(ISD::SUB, dl, VT, N0, N2);
1603 SDValue Ops[] = { Result, Borrow };
1604 return DAG.getMergeValues(Ops, dl);
1605 }
1606 }
1607 }
1608 break;
1609 case XCoreISD::LMUL: {
1610 SDValue N0 = N->getOperand(0);
1611 SDValue N1 = N->getOperand(1);
1612 SDValue N2 = N->getOperand(2);
1613 SDValue N3 = N->getOperand(3);
1614 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1615 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1616 EVT VT = N0.getValueType();
1617 // Canonicalize multiplicative constant to RHS. If both multiplicative
1618 // operands are constant canonicalize smallest to RHS.
1619 if ((N0C && !N1C) ||
1620 (N0C && N1C && N0C->getZExtValue() < N1C->getZExtValue()))
1621 return DAG.getNode(XCoreISD::LMUL, dl, DAG.getVTList(VT, VT),
1622 N1, N0, N2, N3);
1623
1624 // lmul(x, 0, a, b)
1625 if (N1C && N1C->isZero()) {
1626 // If the high result is unused fold to add(a, b)
1627 if (N->hasNUsesOfValue(0, 0)) {
1628 SDValue Lo = DAG.getNode(ISD::ADD, dl, VT, N2, N3);
1629 SDValue Ops[] = { Lo, Lo };
1630 return DAG.getMergeValues(Ops, dl);
1631 }
1632 // Otherwise fold to ladd(a, b, 0)
1633 SDValue Result =
1634 DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N2, N3, N1);
1635 SDValue Carry(Result.getNode(), 1);
1636 SDValue Ops[] = { Carry, Result };
1637 return DAG.getMergeValues(Ops, dl);
1638 }
1639 }
1640 break;
1641 case ISD::ADD: {
1642 // Fold 32 bit expressions such as add(add(mul(x,y),a),b) ->
1643 // lmul(x, y, a, b). The high result of lmul will be ignored.
1644 // This is only profitable if the intermediate results are unused
1645 // elsewhere.
1646 SDValue Mul0, Mul1, Addend0, Addend1;
1647 if (N->getValueType(0) == MVT::i32 &&
1648 isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, true)) {
1649 SDValue Ignored = DAG.getNode(XCoreISD::LMUL, dl,
1650 DAG.getVTList(MVT::i32, MVT::i32), Mul0,
1651 Mul1, Addend0, Addend1);
1652 SDValue Result(Ignored.getNode(), 1);
1653 return Result;
1654 }
1655 APInt HighMask = APInt::getHighBitsSet(64, 32);
1656 // Fold 64 bit expression such as add(add(mul(x,y),a),b) ->
1657 // lmul(x, y, a, b) if all operands are zero-extended. We do this
1658 // before type legalization as it is messy to match the operands after
1659 // that.
1660 if (N->getValueType(0) == MVT::i64 &&
1661 isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, false) &&
1662 DAG.MaskedValueIsZero(Mul0, HighMask) &&
1663 DAG.MaskedValueIsZero(Mul1, HighMask) &&
1664 DAG.MaskedValueIsZero(Addend0, HighMask) &&
1665 DAG.MaskedValueIsZero(Addend1, HighMask)) {
1666 SDValue Mul0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1667 Mul0, DAG.getConstant(0, dl, MVT::i32));
1668 SDValue Mul1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1669 Mul1, DAG.getConstant(0, dl, MVT::i32));
1670 SDValue Addend0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1671 Addend0, DAG.getConstant(0, dl, MVT::i32));
1672 SDValue Addend1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1673 Addend1, DAG.getConstant(0, dl, MVT::i32));
1674 SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl,
1675 DAG.getVTList(MVT::i32, MVT::i32), Mul0L, Mul1L,
1676 Addend0L, Addend1L);
1677 SDValue Lo(Hi.getNode(), 1);
1678 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
1679 }
1680 }
1681 break;
1682 case ISD::STORE: {
1683 // Replace unaligned store of unaligned load with memmove.
1684 StoreSDNode *ST = cast<StoreSDNode>(N);
1685 if (!DCI.isBeforeLegalize() ||
1687 ST->getMemoryVT(),
1688 *ST->getMemOperand()) ||
1689 ST->isVolatile() || ST->isIndexed()) {
1690 break;
1691 }
1692 SDValue Chain = ST->getChain();
1693
1694 unsigned StoreBits = ST->getMemoryVT().getStoreSizeInBits();
1695 assert((StoreBits % 8) == 0 &&
1696 "Store size in bits must be a multiple of 8");
1697 Align Alignment = ST->getAlign();
1698
1699 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(ST->getValue())) {
1700 if (LD->hasNUsesOfValue(1, 0) && ST->getMemoryVT() == LD->getMemoryVT() &&
1701 LD->getAlign() == Alignment &&
1702 !LD->isVolatile() && !LD->isIndexed() &&
1704 bool isTail = isInTailCallPosition(DAG, ST, Chain);
1705 return DAG.getMemmove(Chain, dl, ST->getBasePtr(), LD->getBasePtr(),
1706 DAG.getConstant(StoreBits / 8, dl, MVT::i32),
1707 Alignment, false, nullptr, isTail,
1708 ST->getPointerInfo(), LD->getPointerInfo());
1709 }
1710 }
1711 break;
1712 }
1713 }
1714 return SDValue();
1715}
1716
1717void XCoreTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
1718 KnownBits &Known,
1719 const APInt &DemandedElts,
1720 const SelectionDAG &DAG,
1721 unsigned Depth) const {
1722 Known.resetAll();
1723 switch (Op.getOpcode()) {
1724 default: break;
1725 case XCoreISD::LADD:
1726 case XCoreISD::LSUB:
1727 if (Op.getResNo() == 1) {
1728 // Top bits of carry / borrow are clear.
1729 Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
1730 Known.getBitWidth() - 1);
1731 }
1732 break;
1734 {
1735 unsigned IntNo = Op.getConstantOperandVal(1);
1736 switch (IntNo) {
1737 case Intrinsic::xcore_getts:
1738 // High bits are known to be zero.
1739 Known.Zero =
1740 APInt::getHighBitsSet(Known.getBitWidth(), Known.getBitWidth() - 16);
1741 break;
1742 case Intrinsic::xcore_int:
1743 case Intrinsic::xcore_inct:
1744 // High bits are known to be zero.
1745 Known.Zero =
1746 APInt::getHighBitsSet(Known.getBitWidth(), Known.getBitWidth() - 8);
1747 break;
1748 case Intrinsic::xcore_testct:
1749 // Result is either 0 or 1.
1750 Known.Zero =
1751 APInt::getHighBitsSet(Known.getBitWidth(), Known.getBitWidth() - 1);
1752 break;
1753 case Intrinsic::xcore_testwct:
1754 // Result is in the range 0 - 4.
1755 Known.Zero =
1756 APInt::getHighBitsSet(Known.getBitWidth(), Known.getBitWidth() - 3);
1757 break;
1758 }
1759 }
1760 break;
1761 }
1762}
1763
1764//===----------------------------------------------------------------------===//
1765// Addressing mode description hooks
1766//===----------------------------------------------------------------------===//
1767
1768static inline bool isImmUs(int64_t val)
1769{
1770 return (val >= 0 && val <= 11);
1771}
1772
1773static inline bool isImmUs2(int64_t val)
1774{
1775 return (val%2 == 0 && isImmUs(val/2));
1776}
1777
1778static inline bool isImmUs4(int64_t val)
1779{
1780 return (val%4 == 0 && isImmUs(val/4));
1781}
1782
1783/// isLegalAddressingMode - Return true if the addressing mode represented
1784/// by AM is legal for this target, for a load/store of the specified type.
1786 const AddrMode &AM, Type *Ty,
1787 unsigned AS,
1788 Instruction *I) const {
1789 if (Ty->getTypeID() == Type::VoidTyID)
1790 return AM.Scale == 0 && isImmUs(AM.BaseOffs) && isImmUs4(AM.BaseOffs);
1791
1792 unsigned Size = DL.getTypeAllocSize(Ty);
1793 if (AM.BaseGV) {
1794 return Size >= 4 && !AM.HasBaseReg && AM.Scale == 0 &&
1795 AM.BaseOffs%4 == 0;
1796 }
1797
1798 switch (Size) {
1799 case 1:
1800 // reg + imm
1801 if (AM.Scale == 0) {
1802 return isImmUs(AM.BaseOffs);
1803 }
1804 // reg + reg
1805 return AM.Scale == 1 && AM.BaseOffs == 0;
1806 case 2:
1807 case 3:
1808 // reg + imm
1809 if (AM.Scale == 0) {
1810 return isImmUs2(AM.BaseOffs);
1811 }
1812 // reg + reg<<1
1813 return AM.Scale == 2 && AM.BaseOffs == 0;
1814 default:
1815 // reg + imm
1816 if (AM.Scale == 0) {
1817 return isImmUs4(AM.BaseOffs);
1818 }
1819 // reg + reg<<2
1820 return AM.Scale == 4 && AM.BaseOffs == 0;
1821 }
1822}
1823
1824//===----------------------------------------------------------------------===//
1825// XCore Inline Assembly Support
1826//===----------------------------------------------------------------------===//
1827
1828std::pair<unsigned, const TargetRegisterClass *>
1829XCoreTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
1830 StringRef Constraint,
1831 MVT VT) const {
1832 if (Constraint.size() == 1) {
1833 switch (Constraint[0]) {
1834 default : break;
1835 case 'r':
1836 return std::make_pair(0U, &XCore::GRRegsRegClass);
1837 }
1838 }
1839 // Use the default implementation in TargetLowering to convert the register
1840 // constraint into a member of a register class.
1841 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
1842}
AMDGPU Mark last scratch load
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
uint64_t Addr
uint64_t Size
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
#define G(x, y, z)
Definition: MD5.cpp:56
unsigned const TargetRegisterInfo * TRI
uint64_t High
const char LLVMTargetMachineRef TM
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Value * RHS
Value * LHS
static SDValue LowerCallResult(SDValue Chain, SDValue InGlue, const SmallVectorImpl< CCValAssign > &RVLocs, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals)
LowerCallResult - Lower the result values of a call into the appropriate copies out of appropriate ph...
static bool isImmUs(int64_t val)
static bool isImmUs4(int64_t val)
static bool IsSmallObject(const GlobalValue *GV, const XCoreTargetLowering &XTL)
static bool isWordAligned(SDValue Value, SelectionDAG &DAG)
static bool isADDADDMUL(SDValue Op, SDValue &Mul0, SDValue &Mul1, SDValue &Addend0, SDValue &Addend1, bool requireIntermediatesHaveOneUse)
isADDADDMUL - Return whether Op is in a form that is equivalent to add(add(mul(x,y),...
static bool isImmUs2(int64_t val)
Class for arbitrary precision integers.
Definition: APInt.h:78
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition: APInt.h:286
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
Definition: APInt.h:276
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
The address of a basic block.
Definition: Constants.h:890
CCState - This class holds information needed while lowering arguments and return values.
CCValAssign - Represent assignment of one arg/retval to a location.
bool isRegLoc() const
Register getLocReg() const
LocInfo getLocInfo() const
bool isMemLoc() const
int64_t getLocMemOffset() const
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, GEPNoWrapFlags NW=GEPNoWrapFlags::none(), std::optional< ConstantRange > InRange=std::nullopt, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
Definition: Constants.h:1253
uint64_t getZExtValue() const
This is an important base class in LLVM.
Definition: Constant.h:42
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
Definition: DataLayout.cpp:849
A debug info location.
Definition: DebugLoc.h:33
const GlobalValue * getGlobal() const
bool hasLocalLinkage() const
Definition: GlobalValue.h:528
StringRef getSection() const
Definition: Globals.cpp:183
bool hasSection() const
Definition: GlobalValue.h:290
const DataLayout & getDataLayout() const
Get the data layout of the module this global belongs to.
Definition: Globals.cpp:124
Type * getValueType() const
Definition: GlobalValue.h:296
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
This class is used to represent ISD::LOAD nodes.
Machine Value Type.
SimpleValueType SimpleTy
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineJumpTableInfo * getJumpTableInfo() const
getJumpTableInfo - Return the jump table info object for the current function.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
Definition: MachineInstr.h:69
@ EK_Inline
EK_Inline - Jump table entries are emitted inline at their point of use.
const std::vector< MachineJumpTableEntry > & getJumpTables() const
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
bool reachesChainWithoutSideEffects(SDValue Dest, unsigned Depth=2) const
Return true if this operand (which must be a chain) reaches the specified operand without crossing an...
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:226
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:733
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
const TargetLowering & getTargetLoweringInfo() const
Definition: SelectionDAG.h:493
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
Definition: SelectionDAG.h:743
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:487
SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, bool isTargetGA=false, unsigned TargetFlags=0)
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
SDValue getRegister(unsigned Reg, EVT VT)
SDValue getExternalSymbol(const char *Sym, EVT VT)
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
Definition: SelectionDAG.h:784
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:779
bool isBaseWithConstantOffset(SDValue Op) const
Return true if the specified operand is an ISD::ADD with a ConstantSDNode on the right-hand side,...
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:482
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
Definition: SelectionDAG.h:810
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
LLVMContext * getContext() const
Definition: SelectionDAG.h:500
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:750
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
Definition: SelectionDAG.h:570
bool empty() const
Definition: SmallVector.h:95
size_t size() const
Definition: SmallVector.h:92
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:587
void push_back(const T &Elt)
Definition: SmallVector.h:427
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1210
This class is used to represent ISD::STORE nodes.
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:250
constexpr size_t size() const
size - Get the string size.
Definition: StringRef.h:137
TargetInstrInfo - Interface to description of machine instruction set.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
const TargetMachine & getTargetMachine() const
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
unsigned MaxStoresPerMemmove
Specify maximum number of store instructions per memmove call.
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
unsigned MaxStoresPerMemmoveOptSize
Likewise for functions with the OptSize attribute.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setPrefFunctionAlignment(Align Alignment)
Set the target's preferred function alignment.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
std::vector< ArgListEntry > ArgListTy
bool allowsMemoryAccessForAlignment(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const
This function returns true if the memory access is aligned or if the target allows this specific unal...
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
bool ShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, TargetLoweringOpt &TLO) const
Check to see if the specified operand of the specified instruction is a constant integer.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0, bool AssumeSingleUse=false) const
Look at Op.
bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node, SDValue &Chain) const
Check whether a given call node is in tail position within its function.
virtual bool isGAPlusOffset(SDNode *N, const GlobalValue *&GA, int64_t &Offset) const
Returns true (and the GlobalValue and the offset) if the node is a GlobalAddress + offset.
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:77
CodeModel::Model getCodeModel() const
Returns the code model.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
@ VoidTyID
type with no size
Definition: Type.h:63
static Type * getVoidTy(LLVMContext &C)
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:298
static IntegerType * getInt8Ty(LLVMContext &C)
bool isFunctionTy() const
True if this is an instance of FunctionType.
Definition: Type.h:242
static IntegerType * getInt32Ty(LLVMContext &C)
TypeID getTypeID() const
Return the type id for the type.
Definition: Type.h:136
LLVM Value Representation.
Definition: Value.h:74
Align getPointerAlignment(const DataLayout &DL) const
Returns an alignment of the pointer value.
Definition: Value.cpp:927
static int stackSlotSize()
Stack slot size (4 bytes)
XCoreFunctionInfo - This class is derived from MachineFunction private XCore target-specific informat...
void setReturnStackOffset(unsigned value)
int createLRSpillSlot(MachineFunction &MF)
const TargetRegisterInfo * getRegisterInfo() const override
const XCoreInstrInfo * getInstrInfo() const override
bool isZExtFree(SDValue Val, EVT VT2) const override
Return true if zero-extending the specific node Val to type VT2 is free (either because it's implicit...
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
LowerOperation - Provide custom lowering hooks for some operations.
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
ReplaceNodeResults - Replace the results of node with an illegal result type with new values built ou...
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
XCoreTargetLowering(const TargetMachine &TM, const XCoreSubtarget &Subtarget)
const char * getTargetNodeName(unsigned Opcode) const override
getTargetNodeName - This method returns the name of a target specific
self_iterator getIterator()
Definition: ilist_node.h:132
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:121
@ Entry
Definition: COFF.h:826
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
Definition: ISDOpcodes.h:1194
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
Definition: ISDOpcodes.h:1190
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition: ISDOpcodes.h:257
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
Definition: ISDOpcodes.h:1223
@ FRAME_TO_ARGS_OFFSET
FRAME_TO_ARGS_OFFSET - This node represents offset from frame pointer to first (possible) on-stack ar...
Definition: ISDOpcodes.h:130
@ ADD
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:246
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
Definition: ISDOpcodes.h:1099
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition: ISDOpcodes.h:813
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
Definition: ISDOpcodes.h:205
@ GlobalAddress
Definition: ISDOpcodes.h:78
@ MEMBARRIER
MEMBARRIER - Compiler barrier only; generate a no-op.
Definition: ISDOpcodes.h:1296
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
Definition: ISDOpcodes.h:1301
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
Definition: ISDOpcodes.h:236
@ INIT_TRAMPOLINE
INIT_TRAMPOLINE - This corresponds to the init_trampoline intrinsic.
Definition: ISDOpcodes.h:1267
@ EH_RETURN
OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents 'eh_return' gcc dwarf builtin,...
Definition: ISDOpcodes.h:141
@ SIGN_EXTEND
Conversion operators.
Definition: ISDOpcodes.h:804
@ BR_CC
BR_CC - Conditional branch.
Definition: ISDOpcodes.h:1145
@ BR_JT
BR_JT - Jumptable branch.
Definition: ISDOpcodes.h:1124
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
Definition: ISDOpcodes.h:229
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
Definition: ISDOpcodes.h:1219
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition: ISDOpcodes.h:673
@ SHL
Shift and rotation operations.
Definition: ISDOpcodes.h:734
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:810
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition: ISDOpcodes.h:771
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
Definition: ISDOpcodes.h:1109
@ ConstantPool
Definition: ISDOpcodes.h:82
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
Definition: ISDOpcodes.h:100
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:708
@ TRAP
TRAP - Trapping instruction.
Definition: ISDOpcodes.h:1276
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition: ISDOpcodes.h:190
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
Definition: ISDOpcodes.h:52
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
Definition: ISDOpcodes.h:1214
@ BlockAddress
Definition: ISDOpcodes.h:84
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
Definition: ISDOpcodes.h:793
@ ADJUST_TRAMPOLINE
ADJUST_TRAMPOLINE - This corresponds to the adjust_trampoline intrinsic.
Definition: ISDOpcodes.h:1273
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
Definition: ISDOpcodes.h:198
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
@ Offset
Definition: DWP.cpp:480
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
static const unsigned CodeModelLargeSize
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
@ Other
Any other memory.
@ Mul
Product of integers.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:191
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Extended Value Type.
Definition: ValueTypes.h:35
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition: ValueTypes.h:137
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:359
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:307
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition: ValueTypes.h:152
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
Definition: KnownBits.h:231
unsigned getBitWidth() const
Get the bit width of this value.
Definition: KnownBits.h:40
void resetAll()
Resets the known state of all bits.
Definition: KnownBits.h:70
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...