LLVM 18.0.0git
XCoreISelLowering.cpp
Go to the documentation of this file.
1//===-- XCoreISelLowering.cpp - XCore DAG Lowering Implementation ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the XCoreTargetLowering class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "XCoreISelLowering.h"
14#include "XCore.h"
16#include "XCoreSubtarget.h"
17#include "XCoreTargetMachine.h"
26#include "llvm/IR/CallingConv.h"
27#include "llvm/IR/Constants.h"
29#include "llvm/IR/Function.h"
30#include "llvm/IR/GlobalAlias.h"
32#include "llvm/IR/Intrinsics.h"
33#include "llvm/IR/IntrinsicsXCore.h"
34#include "llvm/Support/Debug.h"
38#include <algorithm>
39
40using namespace llvm;
41
42#define DEBUG_TYPE "xcore-lower"
43
45getTargetNodeName(unsigned Opcode) const
46{
48 {
49 case XCoreISD::FIRST_NUMBER : break;
50 case XCoreISD::BL : return "XCoreISD::BL";
51 case XCoreISD::PCRelativeWrapper : return "XCoreISD::PCRelativeWrapper";
52 case XCoreISD::DPRelativeWrapper : return "XCoreISD::DPRelativeWrapper";
53 case XCoreISD::CPRelativeWrapper : return "XCoreISD::CPRelativeWrapper";
54 case XCoreISD::LDWSP : return "XCoreISD::LDWSP";
55 case XCoreISD::STWSP : return "XCoreISD::STWSP";
56 case XCoreISD::RETSP : return "XCoreISD::RETSP";
57 case XCoreISD::LADD : return "XCoreISD::LADD";
58 case XCoreISD::LSUB : return "XCoreISD::LSUB";
59 case XCoreISD::LMUL : return "XCoreISD::LMUL";
60 case XCoreISD::MACCU : return "XCoreISD::MACCU";
61 case XCoreISD::MACCS : return "XCoreISD::MACCS";
62 case XCoreISD::CRC8 : return "XCoreISD::CRC8";
63 case XCoreISD::BR_JT : return "XCoreISD::BR_JT";
64 case XCoreISD::BR_JT32 : return "XCoreISD::BR_JT32";
65 case XCoreISD::FRAME_TO_ARGS_OFFSET : return "XCoreISD::FRAME_TO_ARGS_OFFSET";
66 case XCoreISD::EH_RETURN : return "XCoreISD::EH_RETURN";
67 }
68 return nullptr;
69}
70
72 const XCoreSubtarget &Subtarget)
73 : TargetLowering(TM), TM(TM), Subtarget(Subtarget) {
74
75 // Set up the register classes.
76 addRegisterClass(MVT::i32, &XCore::GRRegsRegClass);
77
78 // Compute derived properties from the register classes
80
82
84
85 // Use i32 for setcc operations results (slt, sgt, ...).
87 setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct?
88
89 // XCore does not have the NodeTypes below.
92
93 // 64bit
103
104 // Bit Manipulation
109
110 setOperationAction(ISD::TRAP, MVT::Other, Legal);
111
112 // Jump tables.
114
117
118 // Conversion of i64 -> double produces constantpool nodes
120
121 // Loads
122 for (MVT VT : MVT::integer_valuetypes()) {
126
128 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i16, Expand);
129 }
130
131 // Custom expand misaligned loads / stores.
134
135 // Varargs
140
141 // Dynamic stack
145
146 // Exception handling
149
150 // Atomic operations
151 // We request a fence for ATOMIC_* instructions, to reduce them to Monotonic.
152 // As we are always Sequential Consistent, an ATOMIC_FENCE becomes a no OP.
156
157 // TRAMPOLINE is custom lowered.
160
161 // We want to custom lower some of our intrinsics.
163
167
168 // We have target-specific dag combine patterns for the following nodes:
171
174}
175
177 if (Val.getOpcode() != ISD::LOAD)
178 return false;
179
180 EVT VT1 = Val.getValueType();
181 if (!VT1.isSimple() || !VT1.isInteger() ||
182 !VT2.isSimple() || !VT2.isInteger())
183 return false;
184
185 switch (VT1.getSimpleVT().SimpleTy) {
186 default: break;
187 case MVT::i8:
188 return true;
189 }
190
191 return false;
192}
193
196 switch (Op.getOpcode())
197 {
198 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
199 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
200 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
201 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
202 case ISD::BR_JT: return LowerBR_JT(Op, DAG);
203 case ISD::LOAD: return LowerLOAD(Op, DAG);
204 case ISD::STORE: return LowerSTORE(Op, DAG);
205 case ISD::VAARG: return LowerVAARG(Op, DAG);
206 case ISD::VASTART: return LowerVASTART(Op, DAG);
207 case ISD::SMUL_LOHI: return LowerSMUL_LOHI(Op, DAG);
208 case ISD::UMUL_LOHI: return LowerUMUL_LOHI(Op, DAG);
209 // FIXME: Remove these when LegalizeDAGTypes lands.
210 case ISD::ADD:
211 case ISD::SUB: return ExpandADDSUB(Op.getNode(), DAG);
212 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
213 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
214 case ISD::FRAME_TO_ARGS_OFFSET: return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
215 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
216 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
217 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
218 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG);
219 case ISD::ATOMIC_LOAD: return LowerATOMIC_LOAD(Op, DAG);
220 case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op, DAG);
221 default:
222 llvm_unreachable("unimplemented operand");
223 }
224}
225
226/// ReplaceNodeResults - Replace the results of node with an illegal result
227/// type with new values built out of custom code.
230 SelectionDAG &DAG) const {
231 switch (N->getOpcode()) {
232 default:
233 llvm_unreachable("Don't know how to custom expand this!");
234 case ISD::ADD:
235 case ISD::SUB:
236 Results.push_back(ExpandADDSUB(N, DAG));
237 return;
238 }
239}
240
241//===----------------------------------------------------------------------===//
242// Misc Lower Operation implementation
243//===----------------------------------------------------------------------===//
244
245SDValue XCoreTargetLowering::getGlobalAddressWrapper(SDValue GA,
246 const GlobalValue *GV,
247 SelectionDAG &DAG) const {
248 // FIXME there is no actual debug info here
249 SDLoc dl(GA);
250
251 if (GV->getValueType()->isFunctionTy())
252 return DAG.getNode(XCoreISD::PCRelativeWrapper, dl, MVT::i32, GA);
253
254 const auto *GVar = dyn_cast<GlobalVariable>(GV);
255 if ((GV->hasSection() && GV->getSection().startswith(".cp.")) ||
256 (GVar && GVar->isConstant() && GV->hasLocalLinkage()))
257 return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, GA);
258
259 return DAG.getNode(XCoreISD::DPRelativeWrapper, dl, MVT::i32, GA);
260}
261
262static bool IsSmallObject(const GlobalValue *GV, const XCoreTargetLowering &XTL) {
264 return true;
265
266 Type *ObjType = GV->getValueType();
267 if (!ObjType->isSized())
268 return false;
269
270 auto &DL = GV->getParent()->getDataLayout();
271 unsigned ObjSize = DL.getTypeAllocSize(ObjType);
272 return ObjSize < CodeModelLargeSize && ObjSize != 0;
273}
274
275SDValue XCoreTargetLowering::
276LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const
277{
278 const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op);
279 const GlobalValue *GV = GN->getGlobal();
280 SDLoc DL(GN);
281 int64_t Offset = GN->getOffset();
282 if (IsSmallObject(GV, *this)) {
283 // We can only fold positive offsets that are a multiple of the word size.
284 int64_t FoldedOffset = std::max(Offset & ~3, (int64_t)0);
285 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, FoldedOffset);
286 GA = getGlobalAddressWrapper(GA, GV, DAG);
287 // Handle the rest of the offset.
288 if (Offset != FoldedOffset) {
289 SDValue Remaining = DAG.getConstant(Offset - FoldedOffset, DL, MVT::i32);
290 GA = DAG.getNode(ISD::ADD, DL, MVT::i32, GA, Remaining);
291 }
292 return GA;
293 } else {
294 // Ideally we would not fold in offset with an index <= 11.
295 Type *Ty = Type::getInt32Ty(*DAG.getContext());
298 Type::getInt8Ty(*DAG.getContext()), const_cast<GlobalValue *>(GV), Idx);
299 SDValue CP = DAG.getConstantPool(GAI, MVT::i32);
300 return DAG.getLoad(getPointerTy(DAG.getDataLayout()), DL,
301 DAG.getEntryNode(), CP, MachinePointerInfo());
302 }
303}
304
305SDValue XCoreTargetLowering::
306LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const
307{
308 SDLoc DL(Op);
309 auto PtrVT = getPointerTy(DAG.getDataLayout());
310 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
311 SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT);
312
313 return DAG.getNode(XCoreISD::PCRelativeWrapper, DL, PtrVT, Result);
314}
315
316SDValue XCoreTargetLowering::
317LowerConstantPool(SDValue Op, SelectionDAG &DAG) const
318{
319 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
320 // FIXME there isn't really debug info here
321 SDLoc dl(CP);
322 EVT PtrVT = Op.getValueType();
323 SDValue Res;
324 if (CP->isMachineConstantPoolEntry()) {
325 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
326 CP->getAlign(), CP->getOffset());
327 } else {
328 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, CP->getAlign(),
329 CP->getOffset());
330 }
331 return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, Res);
332}
333
336}
337
338SDValue XCoreTargetLowering::
339LowerBR_JT(SDValue Op, SelectionDAG &DAG) const
340{
341 SDValue Chain = Op.getOperand(0);
342 SDValue Table = Op.getOperand(1);
343 SDValue Index = Op.getOperand(2);
344 SDLoc dl(Op);
345 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
346 unsigned JTI = JT->getIndex();
348 const MachineJumpTableInfo *MJTI = MF.getJumpTableInfo();
349 SDValue TargetJT = DAG.getTargetJumpTable(JT->getIndex(), MVT::i32);
350
351 unsigned NumEntries = MJTI->getJumpTables()[JTI].MBBs.size();
352 if (NumEntries <= 32) {
353 return DAG.getNode(XCoreISD::BR_JT, dl, MVT::Other, Chain, TargetJT, Index);
354 }
355 assert((NumEntries >> 31) == 0);
356 SDValue ScaledIndex = DAG.getNode(ISD::SHL, dl, MVT::i32, Index,
357 DAG.getConstant(1, dl, MVT::i32));
358 return DAG.getNode(XCoreISD::BR_JT32, dl, MVT::Other, Chain, TargetJT,
359 ScaledIndex);
360}
361
362SDValue XCoreTargetLowering::lowerLoadWordFromAlignedBasePlusOffset(
363 const SDLoc &DL, SDValue Chain, SDValue Base, int64_t Offset,
364 SelectionDAG &DAG) const {
365 auto PtrVT = getPointerTy(DAG.getDataLayout());
366 if ((Offset & 0x3) == 0) {
367 return DAG.getLoad(PtrVT, DL, Chain, Base, MachinePointerInfo());
368 }
369 // Lower to pair of consecutive word aligned loads plus some bit shifting.
370 int32_t HighOffset = alignTo(Offset, 4);
371 int32_t LowOffset = HighOffset - 4;
372 SDValue LowAddr, HighAddr;
373 if (GlobalAddressSDNode *GASD =
374 dyn_cast<GlobalAddressSDNode>(Base.getNode())) {
375 LowAddr = DAG.getGlobalAddress(GASD->getGlobal(), DL, Base.getValueType(),
376 LowOffset);
377 HighAddr = DAG.getGlobalAddress(GASD->getGlobal(), DL, Base.getValueType(),
378 HighOffset);
379 } else {
380 LowAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base,
381 DAG.getConstant(LowOffset, DL, MVT::i32));
382 HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base,
383 DAG.getConstant(HighOffset, DL, MVT::i32));
384 }
385 SDValue LowShift = DAG.getConstant((Offset - LowOffset) * 8, DL, MVT::i32);
386 SDValue HighShift = DAG.getConstant((HighOffset - Offset) * 8, DL, MVT::i32);
387
388 SDValue Low = DAG.getLoad(PtrVT, DL, Chain, LowAddr, MachinePointerInfo());
389 SDValue High = DAG.getLoad(PtrVT, DL, Chain, HighAddr, MachinePointerInfo());
390 SDValue LowShifted = DAG.getNode(ISD::SRL, DL, MVT::i32, Low, LowShift);
391 SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High, HighShift);
392 SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, LowShifted, HighShifted);
393 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1),
394 High.getValue(1));
395 SDValue Ops[] = { Result, Chain };
396 return DAG.getMergeValues(Ops, DL);
397}
398
400{
401 KnownBits Known = DAG.computeKnownBits(Value);
402 return Known.countMinTrailingZeros() >= 2;
403}
404
405SDValue XCoreTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
406 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
408 LoadSDNode *LD = cast<LoadSDNode>(Op);
409 assert(LD->getExtensionType() == ISD::NON_EXTLOAD &&
410 "Unexpected extension type");
411 assert(LD->getMemoryVT() == MVT::i32 && "Unexpected load EVT");
412
414 LD->getMemoryVT(), *LD->getMemOperand()))
415 return SDValue();
416
417 SDValue Chain = LD->getChain();
418 SDValue BasePtr = LD->getBasePtr();
419 SDLoc DL(Op);
420
421 if (!LD->isVolatile()) {
422 const GlobalValue *GV;
423 int64_t Offset = 0;
424 if (DAG.isBaseWithConstantOffset(BasePtr) &&
425 isWordAligned(BasePtr->getOperand(0), DAG)) {
426 SDValue NewBasePtr = BasePtr->getOperand(0);
427 Offset = cast<ConstantSDNode>(BasePtr->getOperand(1))->getSExtValue();
428 return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, NewBasePtr,
429 Offset, DAG);
430 }
431 if (TLI.isGAPlusOffset(BasePtr.getNode(), GV, Offset) &&
432 GV->getPointerAlignment(DAG.getDataLayout()) >= 4) {
433 SDValue NewBasePtr = DAG.getGlobalAddress(GV, DL,
434 BasePtr->getValueType(0));
435 return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, NewBasePtr,
436 Offset, DAG);
437 }
438 }
439
440 if (LD->getAlign() == Align(2)) {
441 SDValue Low = DAG.getExtLoad(ISD::ZEXTLOAD, DL, MVT::i32, Chain, BasePtr,
442 LD->getPointerInfo(), MVT::i16, Align(2),
443 LD->getMemOperand()->getFlags());
444 SDValue HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
445 DAG.getConstant(2, DL, MVT::i32));
446 SDValue High =
447 DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain, HighAddr,
448 LD->getPointerInfo().getWithOffset(2), MVT::i16,
449 Align(2), LD->getMemOperand()->getFlags());
450 SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High,
451 DAG.getConstant(16, DL, MVT::i32));
452 SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, Low, HighShifted);
453 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1),
454 High.getValue(1));
455 SDValue Ops[] = { Result, Chain };
456 return DAG.getMergeValues(Ops, DL);
457 }
458
459 // Lower to a call to __misaligned_load(BasePtr).
460 Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(Context);
463
464 Entry.Ty = IntPtrTy;
465 Entry.Node = BasePtr;
466 Args.push_back(Entry);
467
469 CLI.setDebugLoc(DL).setChain(Chain).setLibCallee(
470 CallingConv::C, IntPtrTy,
471 DAG.getExternalSymbol("__misaligned_load",
473 std::move(Args));
474
475 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
476 SDValue Ops[] = { CallResult.first, CallResult.second };
477 return DAG.getMergeValues(Ops, DL);
478}
479
480SDValue XCoreTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
482 StoreSDNode *ST = cast<StoreSDNode>(Op);
483 assert(!ST->isTruncatingStore() && "Unexpected store type");
484 assert(ST->getMemoryVT() == MVT::i32 && "Unexpected store EVT");
485
487 ST->getMemoryVT(), *ST->getMemOperand()))
488 return SDValue();
489
490 SDValue Chain = ST->getChain();
491 SDValue BasePtr = ST->getBasePtr();
492 SDValue Value = ST->getValue();
493 SDLoc dl(Op);
494
495 if (ST->getAlign() == Align(2)) {
496 SDValue Low = Value;
497 SDValue High = DAG.getNode(ISD::SRL, dl, MVT::i32, Value,
498 DAG.getConstant(16, dl, MVT::i32));
499 SDValue StoreLow =
500 DAG.getTruncStore(Chain, dl, Low, BasePtr, ST->getPointerInfo(),
501 MVT::i16, Align(2), ST->getMemOperand()->getFlags());
502 SDValue HighAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, BasePtr,
503 DAG.getConstant(2, dl, MVT::i32));
504 SDValue StoreHigh = DAG.getTruncStore(
505 Chain, dl, High, HighAddr, ST->getPointerInfo().getWithOffset(2),
506 MVT::i16, Align(2), ST->getMemOperand()->getFlags());
507 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, StoreLow, StoreHigh);
508 }
509
510 // Lower to a call to __misaligned_store(BasePtr, Value).
511 Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(Context);
514
515 Entry.Ty = IntPtrTy;
516 Entry.Node = BasePtr;
517 Args.push_back(Entry);
518
519 Entry.Node = Value;
520 Args.push_back(Entry);
521
523 CLI.setDebugLoc(dl).setChain(Chain).setCallee(
525 DAG.getExternalSymbol("__misaligned_store",
527 std::move(Args));
528
529 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
530 return CallResult.second;
531}
532
533SDValue XCoreTargetLowering::
534LowerSMUL_LOHI(SDValue Op, SelectionDAG &DAG) const
535{
536 assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::SMUL_LOHI &&
537 "Unexpected operand to lower!");
538 SDLoc dl(Op);
539 SDValue LHS = Op.getOperand(0);
540 SDValue RHS = Op.getOperand(1);
541 SDValue Zero = DAG.getConstant(0, dl, MVT::i32);
543 DAG.getVTList(MVT::i32, MVT::i32), Zero, Zero,
544 LHS, RHS);
545 SDValue Lo(Hi.getNode(), 1);
546 SDValue Ops[] = { Lo, Hi };
547 return DAG.getMergeValues(Ops, dl);
548}
549
550SDValue XCoreTargetLowering::
551LowerUMUL_LOHI(SDValue Op, SelectionDAG &DAG) const
552{
553 assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::UMUL_LOHI &&
554 "Unexpected operand to lower!");
555 SDLoc dl(Op);
556 SDValue LHS = Op.getOperand(0);
557 SDValue RHS = Op.getOperand(1);
558 SDValue Zero = DAG.getConstant(0, dl, MVT::i32);
560 DAG.getVTList(MVT::i32, MVT::i32), LHS, RHS,
561 Zero, Zero);
562 SDValue Lo(Hi.getNode(), 1);
563 SDValue Ops[] = { Lo, Hi };
564 return DAG.getMergeValues(Ops, dl);
565}
566
567/// isADDADDMUL - Return whether Op is in a form that is equivalent to
568/// add(add(mul(x,y),a),b). If requireIntermediatesHaveOneUse is true then
569/// each intermediate result in the calculation must also have a single use.
570/// If the Op is in the correct form the constituent parts are written to Mul0,
571/// Mul1, Addend0 and Addend1.
572static bool
573isADDADDMUL(SDValue Op, SDValue &Mul0, SDValue &Mul1, SDValue &Addend0,
574 SDValue &Addend1, bool requireIntermediatesHaveOneUse)
575{
576 if (Op.getOpcode() != ISD::ADD)
577 return false;
578 SDValue N0 = Op.getOperand(0);
579 SDValue N1 = Op.getOperand(1);
580 SDValue AddOp;
581 SDValue OtherOp;
582 if (N0.getOpcode() == ISD::ADD) {
583 AddOp = N0;
584 OtherOp = N1;
585 } else if (N1.getOpcode() == ISD::ADD) {
586 AddOp = N1;
587 OtherOp = N0;
588 } else {
589 return false;
590 }
591 if (requireIntermediatesHaveOneUse && !AddOp.hasOneUse())
592 return false;
593 if (OtherOp.getOpcode() == ISD::MUL) {
594 // add(add(a,b),mul(x,y))
595 if (requireIntermediatesHaveOneUse && !OtherOp.hasOneUse())
596 return false;
597 Mul0 = OtherOp.getOperand(0);
598 Mul1 = OtherOp.getOperand(1);
599 Addend0 = AddOp.getOperand(0);
600 Addend1 = AddOp.getOperand(1);
601 return true;
602 }
603 if (AddOp.getOperand(0).getOpcode() == ISD::MUL) {
604 // add(add(mul(x,y),a),b)
605 if (requireIntermediatesHaveOneUse && !AddOp.getOperand(0).hasOneUse())
606 return false;
607 Mul0 = AddOp.getOperand(0).getOperand(0);
608 Mul1 = AddOp.getOperand(0).getOperand(1);
609 Addend0 = AddOp.getOperand(1);
610 Addend1 = OtherOp;
611 return true;
612 }
613 if (AddOp.getOperand(1).getOpcode() == ISD::MUL) {
614 // add(add(a,mul(x,y)),b)
615 if (requireIntermediatesHaveOneUse && !AddOp.getOperand(1).hasOneUse())
616 return false;
617 Mul0 = AddOp.getOperand(1).getOperand(0);
618 Mul1 = AddOp.getOperand(1).getOperand(1);
619 Addend0 = AddOp.getOperand(0);
620 Addend1 = OtherOp;
621 return true;
622 }
623 return false;
624}
625
626SDValue XCoreTargetLowering::
627TryExpandADDWithMul(SDNode *N, SelectionDAG &DAG) const
628{
629 SDValue Mul;
631 if (N->getOperand(0).getOpcode() == ISD::MUL) {
632 Mul = N->getOperand(0);
633 Other = N->getOperand(1);
634 } else if (N->getOperand(1).getOpcode() == ISD::MUL) {
635 Mul = N->getOperand(1);
636 Other = N->getOperand(0);
637 } else {
638 return SDValue();
639 }
640 SDLoc dl(N);
641 SDValue LL, RL, AddendL, AddendH;
642 LL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
643 Mul.getOperand(0), DAG.getConstant(0, dl, MVT::i32));
644 RL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
645 Mul.getOperand(1), DAG.getConstant(0, dl, MVT::i32));
646 AddendL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
647 Other, DAG.getConstant(0, dl, MVT::i32));
648 AddendH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
649 Other, DAG.getConstant(1, dl, MVT::i32));
650 APInt HighMask = APInt::getHighBitsSet(64, 32);
651 unsigned LHSSB = DAG.ComputeNumSignBits(Mul.getOperand(0));
652 unsigned RHSSB = DAG.ComputeNumSignBits(Mul.getOperand(1));
653 if (DAG.MaskedValueIsZero(Mul.getOperand(0), HighMask) &&
654 DAG.MaskedValueIsZero(Mul.getOperand(1), HighMask)) {
655 // The inputs are both zero-extended.
657 DAG.getVTList(MVT::i32, MVT::i32), AddendH,
658 AddendL, LL, RL);
659 SDValue Lo(Hi.getNode(), 1);
660 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
661 }
662 if (LHSSB > 32 && RHSSB > 32) {
663 // The inputs are both sign-extended.
665 DAG.getVTList(MVT::i32, MVT::i32), AddendH,
666 AddendL, LL, RL);
667 SDValue Lo(Hi.getNode(), 1);
668 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
669 }
670 SDValue LH, RH;
671 LH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
672 Mul.getOperand(0), DAG.getConstant(1, dl, MVT::i32));
673 RH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
674 Mul.getOperand(1), DAG.getConstant(1, dl, MVT::i32));
676 DAG.getVTList(MVT::i32, MVT::i32), AddendH,
677 AddendL, LL, RL);
678 SDValue Lo(Hi.getNode(), 1);
679 RH = DAG.getNode(ISD::MUL, dl, MVT::i32, LL, RH);
680 LH = DAG.getNode(ISD::MUL, dl, MVT::i32, LH, RL);
681 Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, RH);
682 Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, LH);
683 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
684}
685
686SDValue XCoreTargetLowering::
687ExpandADDSUB(SDNode *N, SelectionDAG &DAG) const
688{
689 assert(N->getValueType(0) == MVT::i64 &&
690 (N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) &&
691 "Unknown operand to lower!");
692
693 if (N->getOpcode() == ISD::ADD)
694 if (SDValue Result = TryExpandADDWithMul(N, DAG))
695 return Result;
696
697 SDLoc dl(N);
698
699 // Extract components
700 SDValue LHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
701 N->getOperand(0),
702 DAG.getConstant(0, dl, MVT::i32));
703 SDValue LHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
704 N->getOperand(0),
705 DAG.getConstant(1, dl, MVT::i32));
706 SDValue RHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
707 N->getOperand(1),
708 DAG.getConstant(0, dl, MVT::i32));
709 SDValue RHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
710 N->getOperand(1),
711 DAG.getConstant(1, dl, MVT::i32));
712
713 // Expand
714 unsigned Opcode = (N->getOpcode() == ISD::ADD) ? XCoreISD::LADD :
716 SDValue Zero = DAG.getConstant(0, dl, MVT::i32);
717 SDValue Lo = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
718 LHSL, RHSL, Zero);
719 SDValue Carry(Lo.getNode(), 1);
720
721 SDValue Hi = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
722 LHSH, RHSH, Carry);
723 SDValue Ignored(Hi.getNode(), 1);
724 // Merge the pieces
725 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
726}
727
728SDValue XCoreTargetLowering::
729LowerVAARG(SDValue Op, SelectionDAG &DAG) const
730{
731 // Whist llvm does not support aggregate varargs we can ignore
732 // the possibility of the ValueType being an implicit byVal vararg.
733 SDNode *Node = Op.getNode();
734 EVT VT = Node->getValueType(0); // not an aggregate
735 SDValue InChain = Node->getOperand(0);
736 SDValue VAListPtr = Node->getOperand(1);
737 EVT PtrVT = VAListPtr.getValueType();
738 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
739 SDLoc dl(Node);
740 SDValue VAList =
741 DAG.getLoad(PtrVT, dl, InChain, VAListPtr, MachinePointerInfo(SV));
742 // Increment the pointer, VAList, to the next vararg
743 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAList,
744 DAG.getIntPtrConstant(VT.getSizeInBits() / 8,
745 dl));
746 // Store the incremented VAList to the legalized pointer
747 InChain = DAG.getStore(VAList.getValue(1), dl, nextPtr, VAListPtr,
749 // Load the actual argument out of the pointer VAList
750 return DAG.getLoad(VT, dl, InChain, VAList, MachinePointerInfo());
751}
752
753SDValue XCoreTargetLowering::
754LowerVASTART(SDValue Op, SelectionDAG &DAG) const
755{
756 SDLoc dl(Op);
757 // vastart stores the address of the VarArgsFrameIndex slot into the
758 // memory location argument
761 SDValue Addr = DAG.getFrameIndex(XFI->getVarArgsFrameIndex(), MVT::i32);
762 return DAG.getStore(Op.getOperand(0), dl, Addr, Op.getOperand(1),
764}
765
766SDValue XCoreTargetLowering::LowerFRAMEADDR(SDValue Op,
767 SelectionDAG &DAG) const {
768 // This nodes represent llvm.frameaddress on the DAG.
769 // It takes one operand, the index of the frame address to return.
770 // An index of zero corresponds to the current function's frame address.
771 // An index of one to the parent's frame address, and so on.
772 // Depths > 0 not supported yet!
773 if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0)
774 return SDValue();
775
777 const TargetRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
778 return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op),
779 RegInfo->getFrameRegister(MF), MVT::i32);
780}
781
782SDValue XCoreTargetLowering::
783LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const {
784 // This nodes represent llvm.returnaddress on the DAG.
785 // It takes one operand, the index of the return address to return.
786 // An index of zero corresponds to the current function's return address.
787 // An index of one to the parent's return address, and so on.
788 // Depths > 0 not supported yet!
789 if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0)
790 return SDValue();
791
794 int FI = XFI->createLRSpillSlot(MF);
795 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
796 return DAG.getLoad(getPointerTy(DAG.getDataLayout()), SDLoc(Op),
797 DAG.getEntryNode(), FIN,
799}
800
801SDValue XCoreTargetLowering::
802LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const {
803 // This node represents offset from frame pointer to first on-stack argument.
804 // This is needed for correct stack adjustment during unwind.
805 // However, we don't know the offset until after the frame has be finalised.
806 // This is done during the XCoreFTAOElim pass.
807 return DAG.getNode(XCoreISD::FRAME_TO_ARGS_OFFSET, SDLoc(Op), MVT::i32);
808}
809
810SDValue XCoreTargetLowering::
811LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
812 // OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER)
813 // This node represents 'eh_return' gcc dwarf builtin, which is used to
814 // return from exception. The general meaning is: adjust stack by OFFSET and
815 // pass execution to HANDLER.
817 SDValue Chain = Op.getOperand(0);
818 SDValue Offset = Op.getOperand(1);
819 SDValue Handler = Op.getOperand(2);
820 SDLoc dl(Op);
821
822 // Absolute SP = (FP + FrameToArgs) + Offset
823 const TargetRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
825 RegInfo->getFrameRegister(MF), MVT::i32);
826 SDValue FrameToArgs = DAG.getNode(XCoreISD::FRAME_TO_ARGS_OFFSET, dl,
827 MVT::i32);
828 Stack = DAG.getNode(ISD::ADD, dl, MVT::i32, Stack, FrameToArgs);
829 Stack = DAG.getNode(ISD::ADD, dl, MVT::i32, Stack, Offset);
830
831 // R0=ExceptionPointerRegister R1=ExceptionSelectorRegister
832 // which leaves 2 caller saved registers, R2 & R3 for us to use.
833 unsigned StackReg = XCore::R2;
834 unsigned HandlerReg = XCore::R3;
835
836 SDValue OutChains[] = {
837 DAG.getCopyToReg(Chain, dl, StackReg, Stack),
838 DAG.getCopyToReg(Chain, dl, HandlerReg, Handler)
839 };
840
841 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
842
843 return DAG.getNode(XCoreISD::EH_RETURN, dl, MVT::Other, Chain,
844 DAG.getRegister(StackReg, MVT::i32),
845 DAG.getRegister(HandlerReg, MVT::i32));
846
847}
848
849SDValue XCoreTargetLowering::
850LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const {
851 return Op.getOperand(0);
852}
853
854SDValue XCoreTargetLowering::
855LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const {
856 SDValue Chain = Op.getOperand(0);
857 SDValue Trmp = Op.getOperand(1); // trampoline
858 SDValue FPtr = Op.getOperand(2); // nested function
859 SDValue Nest = Op.getOperand(3); // 'nest' parameter value
860
861 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
862
863 // .align 4
864 // LDAPF_u10 r11, nest
865 // LDW_2rus r11, r11[0]
866 // STWSP_ru6 r11, sp[0]
867 // LDAPF_u10 r11, fptr
868 // LDW_2rus r11, r11[0]
869 // BAU_1r r11
870 // nest:
871 // .word nest
872 // fptr:
873 // .word fptr
874 SDValue OutChains[5];
875
876 SDValue Addr = Trmp;
877
878 SDLoc dl(Op);
879 OutChains[0] =
880 DAG.getStore(Chain, dl, DAG.getConstant(0x0a3cd805, dl, MVT::i32), Addr,
881 MachinePointerInfo(TrmpAddr));
882
883 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
884 DAG.getConstant(4, dl, MVT::i32));
885 OutChains[1] =
886 DAG.getStore(Chain, dl, DAG.getConstant(0xd80456c0, dl, MVT::i32), Addr,
887 MachinePointerInfo(TrmpAddr, 4));
888
889 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
890 DAG.getConstant(8, dl, MVT::i32));
891 OutChains[2] =
892 DAG.getStore(Chain, dl, DAG.getConstant(0x27fb0a3c, dl, MVT::i32), Addr,
893 MachinePointerInfo(TrmpAddr, 8));
894
895 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
896 DAG.getConstant(12, dl, MVT::i32));
897 OutChains[3] =
898 DAG.getStore(Chain, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 12));
899
900 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
901 DAG.getConstant(16, dl, MVT::i32));
902 OutChains[4] =
903 DAG.getStore(Chain, dl, FPtr, Addr, MachinePointerInfo(TrmpAddr, 16));
904
905 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
906}
907
908SDValue XCoreTargetLowering::
909LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const {
910 SDLoc DL(Op);
911 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
912 switch (IntNo) {
913 case Intrinsic::xcore_crc8:
914 EVT VT = Op.getValueType();
915 SDValue Data =
916 DAG.getNode(XCoreISD::CRC8, DL, DAG.getVTList(VT, VT),
917 Op.getOperand(1), Op.getOperand(2) , Op.getOperand(3));
918 SDValue Crc(Data.getNode(), 1);
919 SDValue Results[] = { Crc, Data };
920 return DAG.getMergeValues(Results, DL);
921 }
922 return SDValue();
923}
924
925SDValue XCoreTargetLowering::
926LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const {
927 SDLoc DL(Op);
928 return DAG.getNode(ISD::MEMBARRIER, DL, MVT::Other, Op.getOperand(0));
929}
930
931SDValue XCoreTargetLowering::
932LowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const {
933 AtomicSDNode *N = cast<AtomicSDNode>(Op);
934 assert(N->getOpcode() == ISD::ATOMIC_LOAD && "Bad Atomic OP");
935 assert((N->getSuccessOrdering() == AtomicOrdering::Unordered ||
936 N->getSuccessOrdering() == AtomicOrdering::Monotonic) &&
937 "shouldInsertFencesForAtomic(true) expects unordered / monotonic");
938 if (N->getMemoryVT() == MVT::i32) {
939 if (N->getAlign() < Align(4))
940 report_fatal_error("atomic load must be aligned");
941 return DAG.getLoad(getPointerTy(DAG.getDataLayout()), SDLoc(Op),
942 N->getChain(), N->getBasePtr(), N->getPointerInfo(),
943 N->getAlign(), N->getMemOperand()->getFlags(),
944 N->getAAInfo(), N->getRanges());
945 }
946 if (N->getMemoryVT() == MVT::i16) {
947 if (N->getAlign() < Align(2))
948 report_fatal_error("atomic load must be aligned");
949 return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(),
950 N->getBasePtr(), N->getPointerInfo(), MVT::i16,
951 N->getAlign(), N->getMemOperand()->getFlags(),
952 N->getAAInfo());
953 }
954 if (N->getMemoryVT() == MVT::i8)
955 return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(),
956 N->getBasePtr(), N->getPointerInfo(), MVT::i8,
957 N->getAlign(), N->getMemOperand()->getFlags(),
958 N->getAAInfo());
959 return SDValue();
960}
961
962SDValue XCoreTargetLowering::
963LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const {
964 AtomicSDNode *N = cast<AtomicSDNode>(Op);
965 assert(N->getOpcode() == ISD::ATOMIC_STORE && "Bad Atomic OP");
966 assert((N->getSuccessOrdering() == AtomicOrdering::Unordered ||
967 N->getSuccessOrdering() == AtomicOrdering::Monotonic) &&
968 "shouldInsertFencesForAtomic(true) expects unordered / monotonic");
969 if (N->getMemoryVT() == MVT::i32) {
970 if (N->getAlign() < Align(4))
971 report_fatal_error("atomic store must be aligned");
972 return DAG.getStore(N->getChain(), SDLoc(Op), N->getVal(), N->getBasePtr(),
973 N->getPointerInfo(), N->getAlign(),
974 N->getMemOperand()->getFlags(), N->getAAInfo());
975 }
976 if (N->getMemoryVT() == MVT::i16) {
977 if (N->getAlign() < Align(2))
978 report_fatal_error("atomic store must be aligned");
979 return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(),
980 N->getBasePtr(), N->getPointerInfo(), MVT::i16,
981 N->getAlign(), N->getMemOperand()->getFlags(),
982 N->getAAInfo());
983 }
984 if (N->getMemoryVT() == MVT::i8)
985 return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(),
986 N->getBasePtr(), N->getPointerInfo(), MVT::i8,
987 N->getAlign(), N->getMemOperand()->getFlags(),
988 N->getAAInfo());
989 return SDValue();
990}
991
993XCoreTargetLowering::getTargetMMOFlags(const Instruction &I) const {
994 // Because of how we convert atomic_load and atomic_store to normal loads and
995 // stores in the DAG, we need to ensure that the MMOs are marked volatile
996 // since DAGCombine hasn't been updated to account for atomic, but non
997 // volatile loads. (See D57601)
998 if (auto *SI = dyn_cast<StoreInst>(&I))
999 if (SI->isAtomic())
1001 if (auto *LI = dyn_cast<LoadInst>(&I))
1002 if (LI->isAtomic())
1004 if (auto *AI = dyn_cast<AtomicRMWInst>(&I))
1005 if (AI->isAtomic())
1007 if (auto *AI = dyn_cast<AtomicCmpXchgInst>(&I))
1008 if (AI->isAtomic())
1011}
1012
1013//===----------------------------------------------------------------------===//
1014// Calling Convention Implementation
1015//===----------------------------------------------------------------------===//
1016
1017#include "XCoreGenCallingConv.inc"
1018
1019//===----------------------------------------------------------------------===//
1020// Call Calling Convention Implementation
1021//===----------------------------------------------------------------------===//
1022
1023/// XCore call implementation
1024SDValue
1025XCoreTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
1026 SmallVectorImpl<SDValue> &InVals) const {
1027 SelectionDAG &DAG = CLI.DAG;
1028 SDLoc &dl = CLI.DL;
1030 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
1032 SDValue Chain = CLI.Chain;
1033 SDValue Callee = CLI.Callee;
1034 bool &isTailCall = CLI.IsTailCall;
1035 CallingConv::ID CallConv = CLI.CallConv;
1036 bool isVarArg = CLI.IsVarArg;
1037
1038 // XCore target does not yet support tail call optimization.
1039 isTailCall = false;
1040
1041 // For now, only CallingConv::C implemented
1042 switch (CallConv)
1043 {
1044 default:
1045 report_fatal_error("Unsupported calling convention");
1046 case CallingConv::Fast:
1047 case CallingConv::C:
1048 return LowerCCCCallTo(Chain, Callee, CallConv, isVarArg, isTailCall,
1049 Outs, OutVals, Ins, dl, DAG, InVals);
1050 }
1051}
1052
1053/// LowerCallResult - Lower the result values of a call into the
1054/// appropriate copies out of appropriate physical registers / memory locations.
1056 const SmallVectorImpl<CCValAssign> &RVLocs,
1057 const SDLoc &dl, SelectionDAG &DAG,
1058 SmallVectorImpl<SDValue> &InVals) {
1059 SmallVector<std::pair<int, unsigned>, 4> ResultMemLocs;
1060 // Copy results out of physical registers.
1061 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1062 const CCValAssign &VA = RVLocs[i];
1063 if (VA.isRegLoc()) {
1064 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getValVT(),
1065 InGlue).getValue(1);
1066 InGlue = Chain.getValue(2);
1067 InVals.push_back(Chain.getValue(0));
1068 } else {
1069 assert(VA.isMemLoc());
1070 ResultMemLocs.push_back(std::make_pair(VA.getLocMemOffset(),
1071 InVals.size()));
1072 // Reserve space for this result.
1073 InVals.push_back(SDValue());
1074 }
1075 }
1076
1077 // Copy results out of memory.
1078 SmallVector<SDValue, 4> MemOpChains;
1079 for (unsigned i = 0, e = ResultMemLocs.size(); i != e; ++i) {
1080 int offset = ResultMemLocs[i].first;
1081 unsigned index = ResultMemLocs[i].second;
1082 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
1083 SDValue Ops[] = { Chain, DAG.getConstant(offset / 4, dl, MVT::i32) };
1084 SDValue load = DAG.getNode(XCoreISD::LDWSP, dl, VTs, Ops);
1085 InVals[index] = load;
1086 MemOpChains.push_back(load.getValue(1));
1087 }
1088
1089 // Transform all loads nodes into one single node because
1090 // all load nodes are independent of each other.
1091 if (!MemOpChains.empty())
1092 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
1093
1094 return Chain;
1095}
1096
1097/// LowerCCCCallTo - functions arguments are copied from virtual
1098/// regs to (physical regs)/(stack frame), CALLSEQ_START and
1099/// CALLSEQ_END are emitted.
1100/// TODO: isTailCall, sret.
1101SDValue XCoreTargetLowering::LowerCCCCallTo(
1102 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg,
1103 bool isTailCall, const SmallVectorImpl<ISD::OutputArg> &Outs,
1104 const SmallVectorImpl<SDValue> &OutVals,
1105 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1106 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1107
1108 // Analyze operands of the call, assigning locations to each operand.
1110 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1111 *DAG.getContext());
1112
1113 // The ABI dictates there should be one stack slot available to the callee
1114 // on function entry (for saving lr).
1115 CCInfo.AllocateStack(4, Align(4));
1116
1117 CCInfo.AnalyzeCallOperands(Outs, CC_XCore);
1118
1120 // Analyze return values to determine the number of bytes of stack required.
1121 CCState RetCCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1122 *DAG.getContext());
1123 RetCCInfo.AllocateStack(CCInfo.getStackSize(), Align(4));
1124 RetCCInfo.AnalyzeCallResult(Ins, RetCC_XCore);
1125
1126 // Get a count of how many bytes are to be pushed on the stack.
1127 unsigned NumBytes = RetCCInfo.getStackSize();
1128
1129 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
1130
1132 SmallVector<SDValue, 12> MemOpChains;
1133
1134 // Walk the register/memloc assignments, inserting copies/loads.
1135 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1136 CCValAssign &VA = ArgLocs[i];
1137 SDValue Arg = OutVals[i];
1138
1139 // Promote the value if needed.
1140 switch (VA.getLocInfo()) {
1141 default: llvm_unreachable("Unknown loc info!");
1142 case CCValAssign::Full: break;
1143 case CCValAssign::SExt:
1144 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
1145 break;
1146 case CCValAssign::ZExt:
1147 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
1148 break;
1149 case CCValAssign::AExt:
1150 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
1151 break;
1152 }
1153
1154 // Arguments that can be passed on register must be kept at
1155 // RegsToPass vector
1156 if (VA.isRegLoc()) {
1157 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1158 } else {
1159 assert(VA.isMemLoc());
1160
1161 int Offset = VA.getLocMemOffset();
1162
1163 MemOpChains.push_back(DAG.getNode(XCoreISD::STWSP, dl, MVT::Other,
1164 Chain, Arg,
1165 DAG.getConstant(Offset/4, dl,
1166 MVT::i32)));
1167 }
1168 }
1169
1170 // Transform all store nodes into one single node because
1171 // all store nodes are independent of each other.
1172 if (!MemOpChains.empty())
1173 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
1174
1175 // Build a sequence of copy-to-reg nodes chained together with token
1176 // chain and flag operands which copy the outgoing args into registers.
1177 // The InGlue in necessary since all emitted instructions must be
1178 // stuck together.
1179 SDValue InGlue;
1180 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1181 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
1182 RegsToPass[i].second, InGlue);
1183 InGlue = Chain.getValue(1);
1184 }
1185
1186 // If the callee is a GlobalAddress node (quite common, every direct call is)
1187 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1188 // Likewise ExternalSymbol -> TargetExternalSymbol.
1189 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1190 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32);
1191 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
1192 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32);
1193
1194 // XCoreBranchLink = #chain, #target_address, #opt_in_flags...
1195 // = Chain, Callee, Reg#1, Reg#2, ...
1196 //
1197 // Returns a chain & a flag for retval copy to use.
1198 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1200 Ops.push_back(Chain);
1201 Ops.push_back(Callee);
1202
1203 // Add argument registers to the end of the list so that they are
1204 // known live into the call.
1205 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1206 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1207 RegsToPass[i].second.getValueType()));
1208
1209 if (InGlue.getNode())
1210 Ops.push_back(InGlue);
1211
1212 Chain = DAG.getNode(XCoreISD::BL, dl, NodeTys, Ops);
1213 InGlue = Chain.getValue(1);
1214
1215 // Create the CALLSEQ_END node.
1216 Chain = DAG.getCALLSEQ_END(Chain, NumBytes, 0, InGlue, dl);
1217 InGlue = Chain.getValue(1);
1218
1219 // Handle result values, copying them out of physregs into vregs that we
1220 // return.
1221 return LowerCallResult(Chain, InGlue, RVLocs, dl, DAG, InVals);
1222}
1223
1224//===----------------------------------------------------------------------===//
1225// Formal Arguments Calling Convention Implementation
1226//===----------------------------------------------------------------------===//
1227
1228namespace {
1229 struct ArgDataPair { SDValue SDV; ISD::ArgFlagsTy Flags; };
1230}
1231
1232/// XCore formal arguments implementation
1233SDValue XCoreTargetLowering::LowerFormalArguments(
1234 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1235 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1236 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1237 switch (CallConv)
1238 {
1239 default:
1240 report_fatal_error("Unsupported calling convention");
1241 case CallingConv::C:
1242 case CallingConv::Fast:
1243 return LowerCCCArguments(Chain, CallConv, isVarArg,
1244 Ins, dl, DAG, InVals);
1245 }
1246}
1247
1248/// LowerCCCArguments - transform physical registers into
1249/// virtual registers and generate load operations for
1250/// arguments places on the stack.
1251/// TODO: sret
1252SDValue XCoreTargetLowering::LowerCCCArguments(
1253 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1254 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1255 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1257 MachineFrameInfo &MFI = MF.getFrameInfo();
1260
1261 // Assign locations to all of the incoming arguments.
1263 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1264 *DAG.getContext());
1265
1266 CCInfo.AnalyzeFormalArguments(Ins, CC_XCore);
1267
1268 unsigned StackSlotSize = XCoreFrameLowering::stackSlotSize();
1269
1270 unsigned LRSaveSize = StackSlotSize;
1271
1272 if (!isVarArg)
1273 XFI->setReturnStackOffset(CCInfo.getStackSize() + LRSaveSize);
1274
1275 // All getCopyFromReg ops must precede any getMemcpys to prevent the
1276 // scheduler clobbering a register before it has been copied.
1277 // The stages are:
1278 // 1. CopyFromReg (and load) arg & vararg registers.
1279 // 2. Chain CopyFromReg nodes into a TokenFactor.
1280 // 3. Memcpy 'byVal' args & push final InVals.
1281 // 4. Chain mem ops nodes into a TokenFactor.
1282 SmallVector<SDValue, 4> CFRegNode;
1285
1286 // 1a. CopyFromReg (and load) arg registers.
1287 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1288
1289 CCValAssign &VA = ArgLocs[i];
1290 SDValue ArgIn;
1291
1292 if (VA.isRegLoc()) {
1293 // Arguments passed in registers
1294 EVT RegVT = VA.getLocVT();
1295 switch (RegVT.getSimpleVT().SimpleTy) {
1296 default:
1297 {
1298#ifndef NDEBUG
1299 errs() << "LowerFormalArguments Unhandled argument type: "
1300 << RegVT << "\n";
1301#endif
1302 llvm_unreachable(nullptr);
1303 }
1304 case MVT::i32:
1305 Register VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass);
1306 RegInfo.addLiveIn(VA.getLocReg(), VReg);
1307 ArgIn = DAG.getCopyFromReg(Chain, dl, VReg, RegVT);
1308 CFRegNode.push_back(ArgIn.getValue(ArgIn->getNumValues() - 1));
1309 }
1310 } else {
1311 // Only arguments passed on the stack should make it here.
1312 assert(VA.isMemLoc());
1313 // Load the argument to a virtual register
1314 unsigned ObjSize = VA.getLocVT().getSizeInBits()/8;
1315 if (ObjSize > StackSlotSize) {
1316 errs() << "LowerFormalArguments Unhandled argument type: "
1317 << VA.getLocVT() << "\n";
1318 }
1319 // Create the frame index object for this incoming parameter...
1320 int FI = MFI.CreateFixedObject(ObjSize,
1321 LRSaveSize + VA.getLocMemOffset(),
1322 true);
1323
1324 // Create the SelectionDAG nodes corresponding to a load
1325 //from this parameter
1326 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1327 ArgIn = DAG.getLoad(VA.getLocVT(), dl, Chain, FIN,
1329 }
1330 const ArgDataPair ADP = { ArgIn, Ins[i].Flags };
1331 ArgData.push_back(ADP);
1332 }
1333
1334 // 1b. CopyFromReg vararg registers.
1335 if (isVarArg) {
1336 // Argument registers
1337 static const MCPhysReg ArgRegs[] = {
1338 XCore::R0, XCore::R1, XCore::R2, XCore::R3
1339 };
1341 unsigned FirstVAReg = CCInfo.getFirstUnallocated(ArgRegs);
1342 if (FirstVAReg < std::size(ArgRegs)) {
1343 int offset = 0;
1344 // Save remaining registers, storing higher register numbers at a higher
1345 // address
1346 for (int i = std::size(ArgRegs) - 1; i >= (int)FirstVAReg; --i) {
1347 // Create a stack slot
1348 int FI = MFI.CreateFixedObject(4, offset, true);
1349 if (i == (int)FirstVAReg) {
1350 XFI->setVarArgsFrameIndex(FI);
1351 }
1352 offset -= StackSlotSize;
1353 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1354 // Move argument from phys reg -> virt reg
1355 Register VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass);
1356 RegInfo.addLiveIn(ArgRegs[i], VReg);
1357 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
1358 CFRegNode.push_back(Val.getValue(Val->getNumValues() - 1));
1359 // Move argument from virt reg -> stack
1360 SDValue Store =
1361 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
1362 MemOps.push_back(Store);
1363 }
1364 } else {
1365 // This will point to the next argument passed via stack.
1367 MFI.CreateFixedObject(4, LRSaveSize + CCInfo.getStackSize(), true));
1368 }
1369 }
1370
1371 // 2. chain CopyFromReg nodes into a TokenFactor.
1372 if (!CFRegNode.empty())
1373 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, CFRegNode);
1374
1375 // 3. Memcpy 'byVal' args & push final InVals.
1376 // Aggregates passed "byVal" need to be copied by the callee.
1377 // The callee will use a pointer to this copy, rather than the original
1378 // pointer.
1379 for (const ArgDataPair &ArgDI : ArgData) {
1380 if (ArgDI.Flags.isByVal() && ArgDI.Flags.getByValSize()) {
1381 unsigned Size = ArgDI.Flags.getByValSize();
1382 Align Alignment =
1383 std::max(Align(StackSlotSize), ArgDI.Flags.getNonZeroByValAlign());
1384 // Create a new object on the stack and copy the pointee into it.
1385 int FI = MFI.CreateStackObject(Size, Alignment, false);
1386 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1387 InVals.push_back(FIN);
1388 MemOps.push_back(DAG.getMemcpy(
1389 Chain, dl, FIN, ArgDI.SDV, DAG.getConstant(Size, dl, MVT::i32),
1390 Alignment, false, false, false, MachinePointerInfo(),
1392 } else {
1393 InVals.push_back(ArgDI.SDV);
1394 }
1395 }
1396
1397 // 4, chain mem ops nodes into a TokenFactor.
1398 if (!MemOps.empty()) {
1399 MemOps.push_back(Chain);
1400 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
1401 }
1402
1403 return Chain;
1404}
1405
1406//===----------------------------------------------------------------------===//
1407// Return Value Calling Convention Implementation
1408//===----------------------------------------------------------------------===//
1409
1410bool XCoreTargetLowering::
1411CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
1412 bool isVarArg,
1414 LLVMContext &Context) const {
1416 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
1417 if (!CCInfo.CheckReturn(Outs, RetCC_XCore))
1418 return false;
1419 if (CCInfo.getStackSize() != 0 && isVarArg)
1420 return false;
1421 return true;
1422}
1423
1424SDValue
1425XCoreTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
1426 bool isVarArg,
1428 const SmallVectorImpl<SDValue> &OutVals,
1429 const SDLoc &dl, SelectionDAG &DAG) const {
1430
1431 XCoreFunctionInfo *XFI =
1434
1435 // CCValAssign - represent the assignment of
1436 // the return value to a location
1438
1439 // CCState - Info about the registers and stack slot.
1440 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1441 *DAG.getContext());
1442
1443 // Analyze return values.
1444 if (!isVarArg)
1445 CCInfo.AllocateStack(XFI->getReturnStackOffset(), Align(4));
1446
1447 CCInfo.AnalyzeReturn(Outs, RetCC_XCore);
1448
1449 SDValue Glue;
1450 SmallVector<SDValue, 4> RetOps(1, Chain);
1451
1452 // Return on XCore is always a "retsp 0"
1453 RetOps.push_back(DAG.getConstant(0, dl, MVT::i32));
1454
1455 SmallVector<SDValue, 4> MemOpChains;
1456 // Handle return values that must be copied to memory.
1457 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1458 CCValAssign &VA = RVLocs[i];
1459 if (VA.isRegLoc())
1460 continue;
1461 assert(VA.isMemLoc());
1462 if (isVarArg) {
1463 report_fatal_error("Can't return value from vararg function in memory");
1464 }
1465
1466 int Offset = VA.getLocMemOffset();
1467 unsigned ObjSize = VA.getLocVT().getSizeInBits() / 8;
1468 // Create the frame index object for the memory location.
1469 int FI = MFI.CreateFixedObject(ObjSize, Offset, false);
1470
1471 // Create a SelectionDAG node corresponding to a store
1472 // to this memory location.
1473 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1474 MemOpChains.push_back(DAG.getStore(
1475 Chain, dl, OutVals[i], FIN,
1477 }
1478
1479 // Transform all store nodes into one single node because
1480 // all stores are independent of each other.
1481 if (!MemOpChains.empty())
1482 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
1483
1484 // Now handle return values copied to registers.
1485 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1486 CCValAssign &VA = RVLocs[i];
1487 if (!VA.isRegLoc())
1488 continue;
1489 // Copy the result values into the output registers.
1490 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Glue);
1491
1492 // guarantee that all emitted copies are
1493 // stuck together, avoiding something bad
1494 Glue = Chain.getValue(1);
1495 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
1496 }
1497
1498 RetOps[0] = Chain; // Update chain.
1499
1500 // Add the glue if we have it.
1501 if (Glue.getNode())
1502 RetOps.push_back(Glue);
1503
1504 return DAG.getNode(XCoreISD::RETSP, dl, MVT::Other, RetOps);
1505}
1506
1507//===----------------------------------------------------------------------===//
1508// Other Lowering Code
1509//===----------------------------------------------------------------------===//
1510
1513 MachineBasicBlock *BB) const {
1514 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
1515 DebugLoc dl = MI.getDebugLoc();
1516 assert((MI.getOpcode() == XCore::SELECT_CC) &&
1517 "Unexpected instr type to insert");
1518
1519 // To "insert" a SELECT_CC instruction, we actually have to insert the diamond
1520 // control-flow pattern. The incoming instruction knows the destination vreg
1521 // to set, the condition code register to branch on, the true/false values to
1522 // select between, and a branch opcode to use.
1523 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1525
1526 // thisMBB:
1527 // ...
1528 // TrueVal = ...
1529 // cmpTY ccX, r1, r2
1530 // bCC copy1MBB
1531 // fallthrough --> copy0MBB
1532 MachineBasicBlock *thisMBB = BB;
1533 MachineFunction *F = BB->getParent();
1534 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
1535 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
1536 F->insert(It, copy0MBB);
1537 F->insert(It, sinkMBB);
1538
1539 // Transfer the remainder of BB and its successor edges to sinkMBB.
1540 sinkMBB->splice(sinkMBB->begin(), BB,
1541 std::next(MachineBasicBlock::iterator(MI)), BB->end());
1543
1544 // Next, add the true and fallthrough blocks as its successors.
1545 BB->addSuccessor(copy0MBB);
1546 BB->addSuccessor(sinkMBB);
1547
1548 BuildMI(BB, dl, TII.get(XCore::BRFT_lru6))
1549 .addReg(MI.getOperand(1).getReg())
1550 .addMBB(sinkMBB);
1551
1552 // copy0MBB:
1553 // %FalseValue = ...
1554 // # fallthrough to sinkMBB
1555 BB = copy0MBB;
1556
1557 // Update machine-CFG edges
1558 BB->addSuccessor(sinkMBB);
1559
1560 // sinkMBB:
1561 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
1562 // ...
1563 BB = sinkMBB;
1564 BuildMI(*BB, BB->begin(), dl, TII.get(XCore::PHI), MI.getOperand(0).getReg())
1565 .addReg(MI.getOperand(3).getReg())
1566 .addMBB(copy0MBB)
1567 .addReg(MI.getOperand(2).getReg())
1568 .addMBB(thisMBB);
1569
1570 MI.eraseFromParent(); // The pseudo instruction is gone now.
1571 return BB;
1572}
1573
1574//===----------------------------------------------------------------------===//
1575// Target Optimization Hooks
1576//===----------------------------------------------------------------------===//
1577
1578SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
1579 DAGCombinerInfo &DCI) const {
1580 SelectionDAG &DAG = DCI.DAG;
1581 SDLoc dl(N);
1582 switch (N->getOpcode()) {
1583 default: break;
1585 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
1586 case Intrinsic::xcore_outt:
1587 case Intrinsic::xcore_outct:
1588 case Intrinsic::xcore_chkct: {
1589 SDValue OutVal = N->getOperand(3);
1590 // These instructions ignore the high bits.
1591 if (OutVal.hasOneUse()) {
1592 unsigned BitWidth = OutVal.getValueSizeInBits();
1593 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 8);
1594 KnownBits Known;
1595 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
1596 !DCI.isBeforeLegalizeOps());
1597 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1598 if (TLI.ShrinkDemandedConstant(OutVal, DemandedMask, TLO) ||
1599 TLI.SimplifyDemandedBits(OutVal, DemandedMask, Known, TLO))
1600 DCI.CommitTargetLoweringOpt(TLO);
1601 }
1602 break;
1603 }
1604 case Intrinsic::xcore_setpt: {
1605 SDValue Time = N->getOperand(3);
1606 // This instruction ignores the high bits.
1607 if (Time.hasOneUse()) {
1608 unsigned BitWidth = Time.getValueSizeInBits();
1609 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 16);
1610 KnownBits Known;
1611 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
1612 !DCI.isBeforeLegalizeOps());
1613 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1614 if (TLI.ShrinkDemandedConstant(Time, DemandedMask, TLO) ||
1615 TLI.SimplifyDemandedBits(Time, DemandedMask, Known, TLO))
1616 DCI.CommitTargetLoweringOpt(TLO);
1617 }
1618 break;
1619 }
1620 }
1621 break;
1622 case XCoreISD::LADD: {
1623 SDValue N0 = N->getOperand(0);
1624 SDValue N1 = N->getOperand(1);
1625 SDValue N2 = N->getOperand(2);
1626 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1627 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1628 EVT VT = N0.getValueType();
1629
1630 // canonicalize constant to RHS
1631 if (N0C && !N1C)
1632 return DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N1, N0, N2);
1633
1634 // fold (ladd 0, 0, x) -> 0, x & 1
1635 if (N0C && N0C->isZero() && N1C && N1C->isZero()) {
1636 SDValue Carry = DAG.getConstant(0, dl, VT);
1637 SDValue Result = DAG.getNode(ISD::AND, dl, VT, N2,
1638 DAG.getConstant(1, dl, VT));
1639 SDValue Ops[] = { Result, Carry };
1640 return DAG.getMergeValues(Ops, dl);
1641 }
1642
1643 // fold (ladd x, 0, y) -> 0, add x, y iff carry is unused and y has only the
1644 // low bit set
1645 if (N1C && N1C->isZero() && N->hasNUsesOfValue(0, 1)) {
1647 VT.getSizeInBits() - 1);
1648 KnownBits Known = DAG.computeKnownBits(N2);
1649 if ((Known.Zero & Mask) == Mask) {
1650 SDValue Carry = DAG.getConstant(0, dl, VT);
1651 SDValue Result = DAG.getNode(ISD::ADD, dl, VT, N0, N2);
1652 SDValue Ops[] = { Result, Carry };
1653 return DAG.getMergeValues(Ops, dl);
1654 }
1655 }
1656 }
1657 break;
1658 case XCoreISD::LSUB: {
1659 SDValue N0 = N->getOperand(0);
1660 SDValue N1 = N->getOperand(1);
1661 SDValue N2 = N->getOperand(2);
1662 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1663 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1664 EVT VT = N0.getValueType();
1665
1666 // fold (lsub 0, 0, x) -> x, -x iff x has only the low bit set
1667 if (N0C && N0C->isZero() && N1C && N1C->isZero()) {
1669 VT.getSizeInBits() - 1);
1670 KnownBits Known = DAG.computeKnownBits(N2);
1671 if ((Known.Zero & Mask) == Mask) {
1672 SDValue Borrow = N2;
1673 SDValue Result = DAG.getNode(ISD::SUB, dl, VT,
1674 DAG.getConstant(0, dl, VT), N2);
1675 SDValue Ops[] = { Result, Borrow };
1676 return DAG.getMergeValues(Ops, dl);
1677 }
1678 }
1679
1680 // fold (lsub x, 0, y) -> 0, sub x, y iff borrow is unused and y has only the
1681 // low bit set
1682 if (N1C && N1C->isZero() && N->hasNUsesOfValue(0, 1)) {
1684 VT.getSizeInBits() - 1);
1685 KnownBits Known = DAG.computeKnownBits(N2);
1686 if ((Known.Zero & Mask) == Mask) {
1687 SDValue Borrow = DAG.getConstant(0, dl, VT);
1688 SDValue Result = DAG.getNode(ISD::SUB, dl, VT, N0, N2);
1689 SDValue Ops[] = { Result, Borrow };
1690 return DAG.getMergeValues(Ops, dl);
1691 }
1692 }
1693 }
1694 break;
1695 case XCoreISD::LMUL: {
1696 SDValue N0 = N->getOperand(0);
1697 SDValue N1 = N->getOperand(1);
1698 SDValue N2 = N->getOperand(2);
1699 SDValue N3 = N->getOperand(3);
1700 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1701 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1702 EVT VT = N0.getValueType();
1703 // Canonicalize multiplicative constant to RHS. If both multiplicative
1704 // operands are constant canonicalize smallest to RHS.
1705 if ((N0C && !N1C) ||
1706 (N0C && N1C && N0C->getZExtValue() < N1C->getZExtValue()))
1707 return DAG.getNode(XCoreISD::LMUL, dl, DAG.getVTList(VT, VT),
1708 N1, N0, N2, N3);
1709
1710 // lmul(x, 0, a, b)
1711 if (N1C && N1C->isZero()) {
1712 // If the high result is unused fold to add(a, b)
1713 if (N->hasNUsesOfValue(0, 0)) {
1714 SDValue Lo = DAG.getNode(ISD::ADD, dl, VT, N2, N3);
1715 SDValue Ops[] = { Lo, Lo };
1716 return DAG.getMergeValues(Ops, dl);
1717 }
1718 // Otherwise fold to ladd(a, b, 0)
1719 SDValue Result =
1720 DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N2, N3, N1);
1721 SDValue Carry(Result.getNode(), 1);
1722 SDValue Ops[] = { Carry, Result };
1723 return DAG.getMergeValues(Ops, dl);
1724 }
1725 }
1726 break;
1727 case ISD::ADD: {
1728 // Fold 32 bit expressions such as add(add(mul(x,y),a),b) ->
1729 // lmul(x, y, a, b). The high result of lmul will be ignored.
1730 // This is only profitable if the intermediate results are unused
1731 // elsewhere.
1732 SDValue Mul0, Mul1, Addend0, Addend1;
1733 if (N->getValueType(0) == MVT::i32 &&
1734 isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, true)) {
1735 SDValue Ignored = DAG.getNode(XCoreISD::LMUL, dl,
1736 DAG.getVTList(MVT::i32, MVT::i32), Mul0,
1737 Mul1, Addend0, Addend1);
1738 SDValue Result(Ignored.getNode(), 1);
1739 return Result;
1740 }
1741 APInt HighMask = APInt::getHighBitsSet(64, 32);
1742 // Fold 64 bit expression such as add(add(mul(x,y),a),b) ->
1743 // lmul(x, y, a, b) if all operands are zero-extended. We do this
1744 // before type legalization as it is messy to match the operands after
1745 // that.
1746 if (N->getValueType(0) == MVT::i64 &&
1747 isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, false) &&
1748 DAG.MaskedValueIsZero(Mul0, HighMask) &&
1749 DAG.MaskedValueIsZero(Mul1, HighMask) &&
1750 DAG.MaskedValueIsZero(Addend0, HighMask) &&
1751 DAG.MaskedValueIsZero(Addend1, HighMask)) {
1752 SDValue Mul0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1753 Mul0, DAG.getConstant(0, dl, MVT::i32));
1754 SDValue Mul1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1755 Mul1, DAG.getConstant(0, dl, MVT::i32));
1756 SDValue Addend0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1757 Addend0, DAG.getConstant(0, dl, MVT::i32));
1758 SDValue Addend1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1759 Addend1, DAG.getConstant(0, dl, MVT::i32));
1760 SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl,
1761 DAG.getVTList(MVT::i32, MVT::i32), Mul0L, Mul1L,
1762 Addend0L, Addend1L);
1763 SDValue Lo(Hi.getNode(), 1);
1764 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
1765 }
1766 }
1767 break;
1768 case ISD::STORE: {
1769 // Replace unaligned store of unaligned load with memmove.
1770 StoreSDNode *ST = cast<StoreSDNode>(N);
1771 if (!DCI.isBeforeLegalize() ||
1773 ST->getMemoryVT(),
1774 *ST->getMemOperand()) ||
1775 ST->isVolatile() || ST->isIndexed()) {
1776 break;
1777 }
1778 SDValue Chain = ST->getChain();
1779
1780 unsigned StoreBits = ST->getMemoryVT().getStoreSizeInBits();
1781 assert((StoreBits % 8) == 0 &&
1782 "Store size in bits must be a multiple of 8");
1783 Align Alignment = ST->getAlign();
1784
1785 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(ST->getValue())) {
1786 if (LD->hasNUsesOfValue(1, 0) && ST->getMemoryVT() == LD->getMemoryVT() &&
1787 LD->getAlign() == Alignment &&
1788 !LD->isVolatile() && !LD->isIndexed() &&
1790 bool isTail = isInTailCallPosition(DAG, ST, Chain);
1791 return DAG.getMemmove(Chain, dl, ST->getBasePtr(), LD->getBasePtr(),
1792 DAG.getConstant(StoreBits / 8, dl, MVT::i32),
1793 Alignment, false, isTail,
1794 ST->getPointerInfo(), LD->getPointerInfo());
1795 }
1796 }
1797 break;
1798 }
1799 }
1800 return SDValue();
1801}
1802
1803void XCoreTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
1804 KnownBits &Known,
1805 const APInt &DemandedElts,
1806 const SelectionDAG &DAG,
1807 unsigned Depth) const {
1808 Known.resetAll();
1809 switch (Op.getOpcode()) {
1810 default: break;
1811 case XCoreISD::LADD:
1812 case XCoreISD::LSUB:
1813 if (Op.getResNo() == 1) {
1814 // Top bits of carry / borrow are clear.
1815 Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
1816 Known.getBitWidth() - 1);
1817 }
1818 break;
1820 {
1821 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
1822 switch (IntNo) {
1823 case Intrinsic::xcore_getts:
1824 // High bits are known to be zero.
1825 Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
1826 Known.getBitWidth() - 16);
1827 break;
1828 case Intrinsic::xcore_int:
1829 case Intrinsic::xcore_inct:
1830 // High bits are known to be zero.
1831 Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
1832 Known.getBitWidth() - 8);
1833 break;
1834 case Intrinsic::xcore_testct:
1835 // Result is either 0 or 1.
1836 Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
1837 Known.getBitWidth() - 1);
1838 break;
1839 case Intrinsic::xcore_testwct:
1840 // Result is in the range 0 - 4.
1841 Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
1842 Known.getBitWidth() - 3);
1843 break;
1844 }
1845 }
1846 break;
1847 }
1848}
1849
1850//===----------------------------------------------------------------------===//
1851// Addressing mode description hooks
1852//===----------------------------------------------------------------------===//
1853
1854static inline bool isImmUs(int64_t val)
1855{
1856 return (val >= 0 && val <= 11);
1857}
1858
1859static inline bool isImmUs2(int64_t val)
1860{
1861 return (val%2 == 0 && isImmUs(val/2));
1862}
1863
1864static inline bool isImmUs4(int64_t val)
1865{
1866 return (val%4 == 0 && isImmUs(val/4));
1867}
1868
1869/// isLegalAddressingMode - Return true if the addressing mode represented
1870/// by AM is legal for this target, for a load/store of the specified type.
1872 const AddrMode &AM, Type *Ty,
1873 unsigned AS,
1874 Instruction *I) const {
1875 if (Ty->getTypeID() == Type::VoidTyID)
1876 return AM.Scale == 0 && isImmUs(AM.BaseOffs) && isImmUs4(AM.BaseOffs);
1877
1878 unsigned Size = DL.getTypeAllocSize(Ty);
1879 if (AM.BaseGV) {
1880 return Size >= 4 && !AM.HasBaseReg && AM.Scale == 0 &&
1881 AM.BaseOffs%4 == 0;
1882 }
1883
1884 switch (Size) {
1885 case 1:
1886 // reg + imm
1887 if (AM.Scale == 0) {
1888 return isImmUs(AM.BaseOffs);
1889 }
1890 // reg + reg
1891 return AM.Scale == 1 && AM.BaseOffs == 0;
1892 case 2:
1893 case 3:
1894 // reg + imm
1895 if (AM.Scale == 0) {
1896 return isImmUs2(AM.BaseOffs);
1897 }
1898 // reg + reg<<1
1899 return AM.Scale == 2 && AM.BaseOffs == 0;
1900 default:
1901 // reg + imm
1902 if (AM.Scale == 0) {
1903 return isImmUs4(AM.BaseOffs);
1904 }
1905 // reg + reg<<2
1906 return AM.Scale == 4 && AM.BaseOffs == 0;
1907 }
1908}
1909
1910//===----------------------------------------------------------------------===//
1911// XCore Inline Assembly Support
1912//===----------------------------------------------------------------------===//
1913
1914std::pair<unsigned, const TargetRegisterClass *>
1915XCoreTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
1916 StringRef Constraint,
1917 MVT VT) const {
1918 if (Constraint.size() == 1) {
1919 switch (Constraint[0]) {
1920 default : break;
1921 case 'r':
1922 return std::make_pair(0U, &XCore::GRRegsRegClass);
1923 }
1924 }
1925 // Use the default implementation in TargetLowering to convert the register
1926 // constraint into a member of a register class.
1927 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
1928}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
uint64_t Addr
uint64_t Size
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
#define G(x, y, z)
Definition: MD5.cpp:56
unsigned const TargetRegisterInfo * TRI
uint64_t High
LLVMContext & Context
const char LLVMTargetMachineRef TM
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Value * RHS
Value * LHS
static SDValue LowerCallResult(SDValue Chain, SDValue InGlue, const SmallVectorImpl< CCValAssign > &RVLocs, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals)
LowerCallResult - Lower the result values of a call into the appropriate copies out of appropriate ph...
static bool isImmUs(int64_t val)
static bool isImmUs4(int64_t val)
static bool IsSmallObject(const GlobalValue *GV, const XCoreTargetLowering &XTL)
static bool isWordAligned(SDValue Value, SelectionDAG &DAG)
static bool isADDADDMUL(SDValue Op, SDValue &Mul0, SDValue &Mul1, SDValue &Addend0, SDValue &Addend1, bool requireIntermediatesHaveOneUse)
isADDADDMUL - Return whether Op is in a form that is equivalent to add(add(mul(x,y),...
static bool isImmUs2(int64_t val)
static constexpr uint32_t Opcode
Definition: aarch32.h:200
Class for arbitrary precision integers.
Definition: APInt.h:76
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition: APInt.h:284
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
Definition: APInt.h:274
This is an SDNode representing atomic operations.
LLVM Basic Block Representation.
Definition: BasicBlock.h:60
The address of a basic block.
Definition: Constants.h:874
CCState - This class holds information needed while lowering arguments and return values.
CCValAssign - Represent assignment of one arg/retval to a location.
bool isRegLoc() const
Register getLocReg() const
LocInfo getLocInfo() const
bool isMemLoc() const
int64_t getLocMemOffset() const
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, bool InBounds=false, std::optional< unsigned > InRangeIndex=std::nullopt, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
Definition: Constants.h:1181
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:888
uint64_t getZExtValue() const
This is an important base class in LLVM.
Definition: Constant.h:41
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
Definition: DataLayout.cpp:878
A debug info location.
Definition: DebugLoc.h:33
const GlobalValue * getGlobal() const
bool hasLocalLinkage() const
Definition: GlobalValue.h:523
StringRef getSection() const
Definition: Globals.cpp:173
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:652
bool hasSection() const
Definition: GlobalValue.h:286
Type * getValueType() const
Definition: GlobalValue.h:292
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
This class is used to represent ISD::LOAD nodes.
Machine Value Type.
SimpleValueType SimpleTy
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineJumpTableInfo * getJumpTableInfo() const
getJumpTableInfo - Return the jump table info object for the current function.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
Definition: MachineInstr.h:68
@ EK_Inline
EK_Inline - Jump table entries are emitted inline at their point of use.
const std::vector< MachineJumpTableEntry > & getJumpTables() const
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition: Module.h:275
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
bool reachesChainWithoutSideEffects(SDValue Dest, unsigned Depth=2) const
Return true if this operand (which must be a chain) reaches the specified operand without crossing an...
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:225
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:720
SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
const TargetLowering & getTargetLoweringInfo() const
Definition: SelectionDAG.h:478
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
Definition: SelectionDAG.h:730
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:472
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, bool isTargetGA=false, unsigned TargetFlags=0)
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
SDValue getRegister(unsigned Reg, EVT VT)
SDValue getExternalSymbol(const char *Sym, EVT VT)
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
Definition: SelectionDAG.h:771
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:766
bool isBaseWithConstantOffset(SDValue Op) const
Return true if the specified operand is an ISD::ADD with a ConstantSDNode on the right-hand side,...
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:469
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
Definition: SelectionDAG.h:797
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
LLVMContext * getContext() const
Definition: SelectionDAG.h:485
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:737
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
Definition: SelectionDAG.h:554
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
This class is used to represent ISD::STORE nodes.
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
constexpr size_t size() const
size - Get the string size.
Definition: StringRef.h:137
bool startswith(StringRef Prefix) const
Definition: StringRef.h:261
TargetInstrInfo - Interface to description of machine instruction set.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
const TargetMachine & getTargetMachine() const
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
unsigned MaxStoresPerMemmove
Specify maximum number of store instructions per memmove call.
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
unsigned MaxStoresPerMemmoveOptSize
Likewise for functions with the OptSize attribute.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setPrefFunctionAlignment(Align Alignment)
Set the target's preferred function alignment.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
std::vector< ArgListEntry > ArgListTy
bool allowsMemoryAccessForAlignment(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const
This function returns true if the memory access is aligned or if the target allows this specific unal...
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
bool ShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, TargetLoweringOpt &TLO) const
Check to see if the specified operand of the specified instruction is a constant integer.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0, bool AssumeSingleUse=false) const
Look at Op.
bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node, SDValue &Chain) const
Check whether a given call node is in tail position within its function.
virtual bool isGAPlusOffset(SDNode *N, const GlobalValue *&GA, int64_t &Offset) const
Returns true (and the GlobalValue and the offset) if the node is a GlobalAddress + offset.
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:78
CodeModel::Model getCodeModel() const
Returns the code model.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
@ VoidTyID
type with no size
Definition: Type.h:63
static Type * getVoidTy(LLVMContext &C)
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:302
static IntegerType * getInt8Ty(LLVMContext &C)
bool isFunctionTy() const
True if this is an instance of FunctionType.
Definition: Type.h:246
static IntegerType * getInt32Ty(LLVMContext &C)
TypeID getTypeID() const
Return the type id for the type.
Definition: Type.h:137
LLVM Value Representation.
Definition: Value.h:74
Align getPointerAlignment(const DataLayout &DL) const
Returns an alignment of the pointer value.
Definition: Value.cpp:926
static int stackSlotSize()
Stack slot size (4 bytes)
XCoreFunctionInfo - This class is derived from MachineFunction private XCore target-specific informat...
void setReturnStackOffset(unsigned value)
int createLRSpillSlot(MachineFunction &MF)
const TargetRegisterInfo * getRegisterInfo() const override
const XCoreInstrInfo * getInstrInfo() const override
bool isZExtFree(SDValue Val, EVT VT2) const override
Return true if zero-extending the specific node Val to type VT2 is free (either because it's implicit...
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
LowerOperation - Provide custom lowering hooks for some operations.
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
ReplaceNodeResults - Replace the results of node with an illegal result type with new values built ou...
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
XCoreTargetLowering(const TargetMachine &TM, const XCoreSubtarget &Subtarget)
const char * getTargetNodeName(unsigned Opcode) const override
getTargetNodeName - This method returns the name of a target specific
self_iterator getIterator()
Definition: ilist_node.h:109
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:121
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
Definition: ISDOpcodes.h:1124
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
Definition: ISDOpcodes.h:1120
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition: ISDOpcodes.h:250
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
Definition: ISDOpcodes.h:1153
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val) This corresponds to "store atomic" instruction.
Definition: ISDOpcodes.h:1233
@ FRAME_TO_ARGS_OFFSET
FRAME_TO_ARGS_OFFSET - This node represents offset from frame pointer to first (possible) on-stack ar...
Definition: ISDOpcodes.h:124
@ ADD
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:239
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
Definition: ISDOpcodes.h:1029
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition: ISDOpcodes.h:783
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
Definition: ISDOpcodes.h:199
@ RETURNADDR
Definition: ISDOpcodes.h:95
@ GlobalAddress
Definition: ISDOpcodes.h:78
@ MEMBARRIER
MEMBARRIER - Compiler barrier only; generate a no-op.
Definition: ISDOpcodes.h:1220
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
Definition: ISDOpcodes.h:1225
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
Definition: ISDOpcodes.h:229
@ INIT_TRAMPOLINE
INIT_TRAMPOLINE - This corresponds to the init_trampoline intrinsic.
Definition: ISDOpcodes.h:1191
@ EH_RETURN
OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents 'eh_return' gcc dwarf builtin,...
Definition: ISDOpcodes.h:135
@ SIGN_EXTEND
Conversion operators.
Definition: ISDOpcodes.h:774
@ BR_CC
BR_CC - Conditional branch.
Definition: ISDOpcodes.h:1075
@ BR_JT
BR_JT - Jumptable branch.
Definition: ISDOpcodes.h:1054
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
Definition: ISDOpcodes.h:1229
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
Definition: ISDOpcodes.h:222
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
Definition: ISDOpcodes.h:1149
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition: ISDOpcodes.h:651
@ SHL
Shift and rotation operations.
Definition: ISDOpcodes.h:705
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:780
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition: ISDOpcodes.h:742
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
Definition: ISDOpcodes.h:1039
@ ConstantPool
Definition: ISDOpcodes.h:82
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
Definition: ISDOpcodes.h:94
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:680
@ TRAP
TRAP - Trapping instruction.
Definition: ISDOpcodes.h:1200
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition: ISDOpcodes.h:184
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
Definition: ISDOpcodes.h:52
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
Definition: ISDOpcodes.h:1144
@ BlockAddress
Definition: ISDOpcodes.h:84
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
Definition: ISDOpcodes.h:763
@ ADJUST_TRAMPOLINE
ADJUST_TRAMPOLINE - This corresponds to the adjust_trampoline intrinsic.
Definition: ISDOpcodes.h:1197
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
Definition: ISDOpcodes.h:192
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
@ Offset
Definition: DWP.cpp:440
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
static const unsigned CodeModelLargeSize
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:156
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
@ Other
Any other memory.
@ Mul
Product of integers.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:191
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Extended Value Type.
Definition: ValueTypes.h:34
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition: ValueTypes.h:129
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:351
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:299
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition: ValueTypes.h:144
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
Definition: KnownBits.h:233
unsigned getBitWidth() const
Get the bit width of this value.
Definition: KnownBits.h:40
void resetAll()
Resets the known state of all bits.
Definition: KnownBits.h:66
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...