178 if (!
Subtarget.supportsMultiplication()) {
267 EVT VT =
Op.getValueType();
270 "Expected power-of-2 shift amount");
273 if (!isa<ConstantSDNode>(
N->getOperand(1))) {
278 SDVTList ResTys = DAG.
getVTList(MVT::i16, MVT::i16);
285 uint64_t ShiftAmount =
N->getConstantOperandVal(1);
286 if (ShiftAmount == 16) {
291 switch (
Op.getOpcode()) {
304 switch (
Op.getOpcode()) {
317 SDValue
Result = DAG.
getNode(Opc, dl, ResTys, SrcLo, SrcHi, Cnt);
323 if (!isa<ConstantSDNode>(
N->getOperand(1))) {
324 switch (
Op.getOpcode()) {
334 SDValue Amt =
N->getOperand(1);
335 EVT AmtVT = Amt.getValueType();
341 SDValue Amt =
N->getOperand(1);
342 EVT AmtVT = Amt.getValueType();
353 uint64_t ShiftAmount =
N->getConstantOperandVal(1);
354 SDValue Victim =
N->getOperand(0);
356 switch (
Op.getOpcode()) {
380 if (
Op.getOpcode() ==
ISD::SHL && 4 <= ShiftAmount && ShiftAmount < 7) {
386 }
else if (
Op.getOpcode() ==
ISD::SRL && 4 <= ShiftAmount &&
393 }
else if (
Op.getOpcode() ==
ISD::SHL && ShiftAmount == 7) {
398 }
else if (
Op.getOpcode() ==
ISD::SRL && ShiftAmount == 7) {
403 }
else if (
Op.getOpcode() ==
ISD::SRA && ShiftAmount == 6) {
408 }
else if (
Op.getOpcode() ==
ISD::SRA && ShiftAmount == 7) {
413 }
else if (
Op.getOpcode() ==
ISD::ROTL && ShiftAmount == 3) {
419 }
else if (
Op.getOpcode() ==
ISD::ROTR && ShiftAmount == 3) {
425 }
else if (
Op.getOpcode() ==
ISD::ROTL && ShiftAmount == 7) {
430 }
else if (
Op.getOpcode() ==
ISD::ROTR && ShiftAmount == 7) {
444 switch (ShiftAmount) {
463 if (4 <= ShiftAmount && ShiftAmount < 8)
464 switch (
Op.getOpcode()) {
478 else if (8 <= ShiftAmount && ShiftAmount < 12)
479 switch (
Op.getOpcode()) {
504 else if (12 <= ShiftAmount)
505 switch (
Op.getOpcode()) {
532 while (ShiftAmount--) {
533 Victim = DAG.
getNode(Opc8, dl, VT, Victim);
539SDValue AVRTargetLowering::LowerDivRem(SDValue
Op, SelectionDAG &DAG)
const {
540 unsigned Opcode =
Op->getOpcode();
542 "Invalid opcode for Div/Rem lowering");
544 EVT VT =
Op->getValueType(0);
545 Type *Ty = VT.getTypeForEVT(*DAG.getContext());
548 switch (VT.getSimpleVT().SimpleTy) {
552 LC = IsSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8;
555 LC = IsSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16;
558 LC = IsSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32;
562 SDValue InChain = DAG.getEntryNode();
565 TargetLowering::ArgListEntry
Entry;
566 for (SDValue
const &Value :
Op->op_values()) {
568 Entry.Ty =
Value.getValueType().getTypeForEVT(*DAG.getContext());
569 Entry.IsSExt = IsSigned;
570 Entry.IsZExt = !IsSigned;
571 Args.push_back(Entry);
580 TargetLowering::CallLoweringInfo CLI(DAG);
585 .setSExtResult(IsSigned)
586 .setZExtResult(!IsSigned);
588 std::pair<SDValue, SDValue> CallInfo =
LowerCallTo(CLI);
589 return CallInfo.first;
592SDValue AVRTargetLowering::LowerGlobalAddress(SDValue
Op,
593 SelectionDAG &DAG)
const {
594 auto DL = DAG.getDataLayout();
596 const GlobalValue *GV = cast<GlobalAddressSDNode>(
Op)->getGlobal();
597 int64_t
Offset = cast<GlobalAddressSDNode>(
Op)->getOffset();
605SDValue AVRTargetLowering::LowerBlockAddress(SDValue
Op,
606 SelectionDAG &DAG)
const {
607 auto DL = DAG.getDataLayout();
608 const BlockAddress *BA = cast<BlockAddressSDNode>(
Op)->getBlockAddress();
636SDValue AVRTargetLowering::getAVRCmp(SDValue
LHS, SDValue
RHS,
637 SelectionDAG &DAG, SDLoc
DL)
const {
638 assert((
LHS.getSimpleValueType() ==
RHS.getSimpleValueType()) &&
639 "LHS and RHS have different types");
640 assert(((
LHS.getSimpleValueType() == MVT::i16) ||
641 (
LHS.getSimpleValueType() == MVT::i8)) &&
642 "invalid comparison type");
646 if (
LHS.getSimpleValueType() == MVT::i16 && isa<ConstantSDNode>(
RHS)) {
651 DAG.getIntPtrConstant(0,
DL));
653 DAG.getIntPtrConstant(1,
DL));
654 SDValue RHSlo = (Imm & 0xff) == 0
657 DAG.getIntPtrConstant(0,
DL));
658 SDValue RHShi = (Imm & 0xff00) == 0
661 DAG.getIntPtrConstant(1,
DL));
663 Cmp = DAG.getNode(
AVRISD::CMPC,
DL, MVT::Glue, LHShi, RHShi, Cmp);
664 }
else if (
RHS.getSimpleValueType() == MVT::i16 && isa<ConstantSDNode>(
LHS)) {
668 SDValue LHSlo = (
Imm & 0xff) == 0
671 DAG.getIntPtrConstant(0,
DL));
672 SDValue LHShi = (
Imm & 0xff00) == 0
675 DAG.getIntPtrConstant(1,
DL));
677 DAG.getIntPtrConstant(0,
DL));
679 DAG.getIntPtrConstant(1,
DL));
693 SDValue &AVRcc, SelectionDAG &DAG,
696 EVT VT =
LHS.getValueType();
697 bool UseTest =
false;
709 if (
const ConstantSDNode *
C = dyn_cast<ConstantSDNode>(
RHS)) {
710 switch (
C->getSExtValue()) {
722 LHS = DAG.getConstant(0,
DL, VT);
729 RHS = DAG.getConstant(
C->getSExtValue() + 1,
DL, VT);
742 if (
const ConstantSDNode *
C = dyn_cast<ConstantSDNode>(
RHS)) {
743 switch (
C->getSExtValue()) {
748 LHS = DAG.getConstant(0,
DL, VT);
772 if (
const ConstantSDNode *
C = dyn_cast<ConstantSDNode>(
RHS)) {
773 RHS = DAG.getConstant(
C->getSExtValue() + 1,
DL, VT);
786 if (VT == MVT::i32) {
788 DAG.getIntPtrConstant(0,
DL));
790 DAG.getIntPtrConstant(1,
DL));
792 DAG.getIntPtrConstant(0,
DL));
794 DAG.getIntPtrConstant(1,
DL));
799 DAG.getIntPtrConstant(1,
DL));
802 Cmp = getAVRCmp(LHSlo, RHSlo, DAG,
DL);
805 }
else if (VT == MVT::i64) {
807 DAG.getIntPtrConstant(0,
DL));
809 DAG.getIntPtrConstant(1,
DL));
812 DAG.getIntPtrConstant(0,
DL));
814 DAG.getIntPtrConstant(1,
DL));
816 DAG.getIntPtrConstant(0,
DL));
818 DAG.getIntPtrConstant(1,
DL));
821 DAG.getIntPtrConstant(0,
DL));
823 DAG.getIntPtrConstant(1,
DL));
826 DAG.getIntPtrConstant(0,
DL));
828 DAG.getIntPtrConstant(1,
DL));
830 DAG.getIntPtrConstant(0,
DL));
832 DAG.getIntPtrConstant(1,
DL));
837 DAG.getIntPtrConstant(1,
DL));
840 Cmp = getAVRCmp(LHS0, RHS0, DAG,
DL);
845 }
else if (VT == MVT::i8 || VT == MVT::i16) {
852 LHS, DAG.getIntPtrConstant(1,
DL)));
868SDValue AVRTargetLowering::LowerBR_CC(SDValue
Op, SelectionDAG &DAG)
const {
869 SDValue Chain =
Op.getOperand(0);
871 SDValue
LHS =
Op.getOperand(2);
872 SDValue
RHS =
Op.getOperand(3);
873 SDValue Dest =
Op.getOperand(4);
877 SDValue
Cmp = getAVRCmp(
LHS,
RHS,
CC, TargetCC, DAG, dl);
879 return DAG.getNode(
AVRISD::BRCOND, dl, MVT::Other, Chain, Dest, TargetCC,
883SDValue AVRTargetLowering::LowerSELECT_CC(SDValue
Op, SelectionDAG &DAG)
const {
884 SDValue
LHS =
Op.getOperand(0);
885 SDValue
RHS =
Op.getOperand(1);
886 SDValue TrueV =
Op.getOperand(2);
887 SDValue FalseV =
Op.getOperand(3);
892 SDValue
Cmp = getAVRCmp(
LHS,
RHS,
CC, TargetCC, DAG, dl);
894 SDVTList VTs = DAG.getVTList(
Op.getValueType(), MVT::Glue);
895 SDValue Ops[] = {TrueV, FalseV, TargetCC,
Cmp};
900SDValue AVRTargetLowering::LowerSETCC(SDValue
Op, SelectionDAG &DAG)
const {
901 SDValue
LHS =
Op.getOperand(0);
902 SDValue
RHS =
Op.getOperand(1);
909 SDValue TrueV = DAG.getConstant(1,
DL,
Op.getValueType());
910 SDValue FalseV = DAG.getConstant(0,
DL,
Op.getValueType());
911 SDVTList VTs = DAG.getVTList(
Op.getValueType(), MVT::Glue);
912 SDValue Ops[] = {TrueV, FalseV, TargetCC,
Cmp};
917SDValue AVRTargetLowering::LowerVASTART(SDValue
Op, SelectionDAG &DAG)
const {
918 const MachineFunction &MF = DAG.getMachineFunction();
919 const AVRMachineFunctionInfo *AFI = MF.getInfo<AVRMachineFunctionInfo>();
920 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
921 auto DL = DAG.getDataLayout();
926 SDValue FI = DAG.getFrameIndex(AFI->getVarArgsFrameIndex(),
getPointerTy(
DL));
928 return DAG.getStore(
Op.getOperand(0), dl, FI,
Op.getOperand(1),
929 MachinePointerInfo(SV));
933SDValue AVRTargetLowering::LowerINLINEASM(SDValue
Op, SelectionDAG &DAG)
const {
949 SmallVector<SDValue, 8> Ops;
950 SDNode *
N =
Op.getNode();
952 for (
unsigned I = 0;
I <
N->getNumOperands();
I++) {
953 SDValue Operand =
N->getOperand(
I);
954 if (Operand.getValueType() == MVT::Glue) {
959 Ops.push_back(Operand);
963 Ops.push_back(DAG.getTargetConstant(Flags, dl, MVT::i32));
964 Ops.push_back(ZeroReg);
971 SDValue
New = DAG.getNode(
N->getOpcode(), dl,
N->getVTList(), Ops);
972 DAG.ReplaceAllUsesOfValueWith(
Op, New);
973 DAG.ReplaceAllUsesOfValueWith(
Op.getValue(1),
New.getValue(1));
979 switch (
Op.getOpcode()) {
987 return LowerShifts(
Op, DAG);
989 return LowerGlobalAddress(
Op, DAG);
991 return LowerBlockAddress(
Op, DAG);
993 return LowerBR_CC(
Op, DAG);
995 return LowerSELECT_CC(
Op, DAG);
997 return LowerSETCC(
Op, DAG);
999 return LowerVASTART(
Op, DAG);
1002 return LowerDivRem(
Op, DAG);
1004 return LowerINLINEASM(
Op, DAG);
1017 switch (
N->getOpcode()) {
1020 if (
const ConstantSDNode *
C = dyn_cast<ConstantSDNode>(
N->getOperand(1))) {
1079 if (
const LoadSDNode *LD = dyn_cast<LoadSDNode>(
N)) {
1080 VT = LD->getMemoryVT();
1081 Op = LD->getBasePtr().getNode();
1087 }
else if (
const StoreSDNode *ST = dyn_cast<StoreSDNode>(
N)) {
1088 VT = ST->getMemoryVT();
1089 Op = ST->getBasePtr().getNode();
1097 if (VT != MVT::i8 && VT != MVT::i16) {
1106 int RHSC =
RHS->getSExtValue();
1110 if ((VT == MVT::i16 && RHSC != -2) || (VT == MVT::i8 && RHSC != -1)) {
1114 Base =
Op->getOperand(0);
1135 if (
const LoadSDNode *LD = dyn_cast<LoadSDNode>(
N)) {
1136 VT = LD->getMemoryVT();
1139 }
else if (
const StoreSDNode *ST = dyn_cast<StoreSDNode>(
N)) {
1140 VT = ST->getMemoryVT();
1148 if (VT == MVT::i16 && !
Subtarget.hasLowByteFirst())
1154 if (VT != MVT::i8 && VT != MVT::i16) {
1163 int RHSC =
RHS->getSExtValue();
1166 if ((VT == MVT::i16 && RHSC != 2) || (VT == MVT::i8 && RHSC != 1)) {
1172 if (
const LoadSDNode *LD = dyn_cast<LoadSDNode>(
N))
1176 Base =
Op->getOperand(0);
1195#include "AVRGenCallingConv.inc"
1200 AVR::R25, AVR::R24, AVR::R23, AVR::R22, AVR::R21, AVR::R20,
1201 AVR::R19, AVR::R18, AVR::R17, AVR::R16, AVR::R15, AVR::R14,
1202 AVR::R13, AVR::R12, AVR::R11, AVR::R10, AVR::R9, AVR::R8};
1204 AVR::R22, AVR::R21, AVR::R20};
1206 AVR::R26R25, AVR::R25R24, AVR::R24R23, AVR::R23R22, AVR::R22R21,
1207 AVR::R21R20, AVR::R20R19, AVR::R19R18, AVR::R18R17, AVR::R17R16,
1208 AVR::R16R15, AVR::R15R14, AVR::R14R13, AVR::R13R12, AVR::R12R11,
1209 AVR::R11R10, AVR::R10R9, AVR::R9R8};
1211 AVR::R24R23, AVR::R23R22,
1212 AVR::R22R21, AVR::R21R20};
1215 "8-bit and 16-bit register arrays must be of equal length");
1217 "8-bit and 16-bit register arrays must be of equal length");
1223template <
typename ArgT>
1240 unsigned NumArgs = Args.size();
1243 int RegLastIdx = -1;
1245 bool UseStack =
false;
1246 for (
unsigned i = 0; i != NumArgs;) {
1247 MVT VT = Args[i].VT;
1252 unsigned ArgIndex = Args[i].OrigArgIndex;
1255 for (; j != NumArgs; ++j) {
1256 if (Args[j].OrigArgIndex != ArgIndex)
1258 TotalBytes += Args[j].VT.getStoreSize();
1261 TotalBytes =
alignTo(TotalBytes, 2);
1263 if (TotalBytes == 0)
1266 unsigned RegIdx = RegLastIdx + TotalBytes;
1267 RegLastIdx = RegIdx;
1269 if (RegIdx >= RegList8.
size()) {
1272 for (; i != j; ++i) {
1273 MVT VT = Args[i].VT;
1283 if (VT == MVT::i8) {
1285 }
else if (VT == MVT::i16) {
1289 "calling convention can only manage i8 and i16 types");
1291 assert(
Reg &&
"register not available in calling convention");
1302template <
typename ArgT>
1305 unsigned TotalBytes = 0;
1307 for (
const ArgT &Arg : Args) {
1308 TotalBytes += Arg.VT.getStoreSize();
1316template <
typename ArgT>
1319 unsigned NumArgs = Args.size();
1323 assert(TotalBytes <= 4 &&
1324 "return values greater than 4 bytes cannot be lowered on AVRTiny");
1326 assert(TotalBytes <= 8 &&
1327 "return values greater than 8 bytes cannot be lowered on AVR");
1342 if (TotalBytes > 4) {
1345 TotalBytes =
alignTo(TotalBytes, 2);
1349 int RegIdx = TotalBytes - 1;
1350 for (
unsigned i = 0; i != NumArgs; ++i) {
1351 MVT VT = Args[i].VT;
1353 if (VT == MVT::i8) {
1355 }
else if (VT == MVT::i16) {
1360 assert(
Reg &&
"register not available in calling convention");
1367SDValue AVRTargetLowering::LowerFormalArguments(
1369 const SmallVectorImpl<ISD::InputArg> &Ins,
const SDLoc &dl,
1370 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals)
const {
1371 MachineFunction &MF = DAG.getMachineFunction();
1372 MachineFrameInfo &MFI = MF.getFrameInfo();
1373 auto DL = DAG.getDataLayout();
1376 SmallVector<CCValAssign, 16> ArgLocs;
1377 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1382 CCInfo.AnalyzeFormalArguments(Ins, ArgCC_AVR_Vararg);
1389 for (CCValAssign &VA : ArgLocs) {
1392 if (VA.isRegLoc()) {
1393 EVT RegVT = VA.getLocVT();
1394 const TargetRegisterClass *RC;
1395 if (RegVT == MVT::i8) {
1396 RC = &AVR::GPR8RegClass;
1397 }
else if (RegVT == MVT::i16) {
1398 RC = &AVR::DREGSRegClass;
1404 ArgValue = DAG.getCopyFromReg(Chain, dl,
Reg, RegVT);
1412 switch (VA.getLocInfo()) {
1418 ArgValue = DAG.getNode(
ISD::BITCAST, dl, VA.getValVT(), ArgValue);
1422 DAG.getValueType(VA.getValVT()));
1423 ArgValue = DAG.getNode(
ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
1427 DAG.getValueType(VA.getValVT()));
1428 ArgValue = DAG.getNode(
ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
1432 InVals.push_back(ArgValue);
1437 EVT LocVT = VA.getLocVT();
1440 int FI = MFI.CreateFixedObject(LocVT.getSizeInBits() / 8,
1441 VA.getLocMemOffset(),
true);
1446 InVals.push_back(DAG.getLoad(LocVT, dl, Chain, FIN,
1454 unsigned StackSize = CCInfo.getStackSize();
1455 AVRMachineFunctionInfo *AFI = MF.getInfo<AVRMachineFunctionInfo>();
1457 AFI->setVarArgsFrameIndex(MFI.CreateFixedObject(2, StackSize,
true));
1467SDValue AVRTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
1468 SmallVectorImpl<SDValue> &InVals)
const {
1469 SelectionDAG &DAG = CLI.DAG;
1471 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
1472 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
1473 SmallVectorImpl<ISD::InputArg> &
Ins = CLI.Ins;
1474 SDValue Chain = CLI.Chain;
1475 SDValue
Callee = CLI.Callee;
1476 bool &isTailCall = CLI.IsTailCall;
1478 bool isVarArg = CLI.IsVarArg;
1480 MachineFunction &MF = DAG.getMachineFunction();
1486 SmallVector<CCValAssign, 16> ArgLocs;
1487 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1494 if (
const GlobalAddressSDNode *
G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1495 const GlobalValue *GV =
G->getGlobal();
1496 if (isa<Function>(GV))
1497 F = cast<Function>(GV);
1499 DAG.getTargetGlobalAddress(GV,
DL,
getPointerTy(DAG.getDataLayout()));
1500 }
else if (
const ExternalSymbolSDNode *ES =
1501 dyn_cast<ExternalSymbolSDNode>(Callee)) {
1502 Callee = DAG.getTargetExternalSymbol(ES->getSymbol(),
1508 CCInfo.AnalyzeCallOperands(Outs, ArgCC_AVR_Vararg);
1515 unsigned NumBytes = CCInfo.getStackSize();
1517 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0,
DL);
1519 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
1523 bool HasStackArgs =
false;
1524 for (AI = 0, AE = ArgLocs.size(); AI != AE; ++AI) {
1525 CCValAssign &VA = ArgLocs[AI];
1526 EVT RegVT = VA.getLocVT();
1527 SDValue Arg = OutVals[AI];
1530 switch (VA.getLocInfo()) {
1551 if (VA.isMemLoc()) {
1552 HasStackArgs =
true;
1558 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1568 SmallVector<SDValue, 8> MemOpChains;
1569 for (; AI != AE; AI++) {
1570 CCValAssign &VA = ArgLocs[AI];
1571 SDValue Arg = OutVals[AI];
1576 SDValue PtrOff = DAG.getNode(
1578 DAG.getRegister(AVR::SP,
getPointerTy(DAG.getDataLayout())),
1579 DAG.getIntPtrConstant(VA.getLocMemOffset() + 1,
DL));
1581 MemOpChains.push_back(
1582 DAG.getStore(Chain,
DL, Arg, PtrOff,
1586 if (!MemOpChains.empty())
1594 for (
auto Reg : RegsToPass) {
1595 Chain = DAG.getCopyToReg(Chain,
DL,
Reg.first,
Reg.second, InGlue);
1596 InGlue = Chain.getValue(1);
1600 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1601 SmallVector<SDValue, 8> Ops;
1602 Ops.push_back(Chain);
1603 Ops.push_back(Callee);
1607 for (
auto Reg : RegsToPass) {
1608 Ops.push_back(DAG.getRegister(
Reg.first,
Reg.second.getValueType()));
1618 TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv);
1619 assert(Mask &&
"Missing call preserved mask for calling convention");
1620 Ops.push_back(DAG.getRegisterMask(Mask));
1622 if (InGlue.getNode()) {
1623 Ops.push_back(InGlue);
1627 InGlue = Chain.getValue(1);
1630 Chain = DAG.getCALLSEQ_END(Chain, NumBytes, 0, InGlue,
DL);
1633 InGlue = Chain.getValue(1);
1638 return LowerCallResult(Chain, InGlue, CallConv, isVarArg, Ins,
DL, DAG,
1645SDValue AVRTargetLowering::LowerCallResult(
1646 SDValue Chain, SDValue InGlue,
CallingConv::ID CallConv,
bool isVarArg,
1647 const SmallVectorImpl<ISD::InputArg> &Ins,
const SDLoc &dl,
1648 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals)
const {
1651 SmallVector<CCValAssign, 16> RVLocs;
1652 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1657 CCInfo.AnalyzeCallResult(Ins, RetCC_AVR_BUILTIN);
1663 for (CCValAssign
const &RVLoc : RVLocs) {
1664 Chain = DAG.getCopyFromReg(Chain, dl, RVLoc.getLocReg(), RVLoc.getValVT(),
1667 InGlue = Chain.getValue(2);
1668 InVals.push_back(Chain.getValue(0));
1678bool AVRTargetLowering::CanLowerReturn(
1680 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context)
const {
1682 SmallVector<CCValAssign, 16> RVLocs;
1683 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
1684 return CCInfo.CheckReturn(Outs, RetCC_AVR_BUILTIN);
1692AVRTargetLowering::LowerReturn(SDValue Chain,
CallingConv::ID CallConv,
1694 const SmallVectorImpl<ISD::OutputArg> &Outs,
1695 const SmallVectorImpl<SDValue> &OutVals,
1696 const SDLoc &dl, SelectionDAG &DAG)
const {
1698 SmallVector<CCValAssign, 16> RVLocs;
1701 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1704 MachineFunction &MF = DAG.getMachineFunction();
1708 CCInfo.AnalyzeReturn(Outs, RetCC_AVR_BUILTIN);
1714 SmallVector<SDValue, 4> RetOps(1, Chain);
1716 for (
unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1717 CCValAssign &VA = RVLocs[i];
1718 assert(VA.isRegLoc() &&
"Can only return in registers!");
1720 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Glue);
1723 Glue = Chain.getValue(1);
1724 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
1729 if (MF.getFunction().getAttributes().hasFnAttr(Attribute::Naked)) {
1733 const AVRMachineFunctionInfo *AFI = MF.getInfo<AVRMachineFunctionInfo>();
1735 if (!AFI->isInterruptOrSignalHandler()) {
1748 if (Glue.getNode()) {
1749 RetOps.push_back(Glue);
1752 return DAG.getNode(RetOpc, dl, MVT::Other, RetOps);
1759MachineBasicBlock *AVRTargetLowering::insertShift(MachineInstr &
MI,
1760 MachineBasicBlock *BB,
1763 const TargetRegisterClass *RC;
1764 bool HasRepeatedOperand =
false;
1765 MachineFunction *
F = BB->getParent();
1766 MachineRegisterInfo &RI =
F->getRegInfo();
1770 switch (
MI.getOpcode()) {
1775 RC = &AVR::GPR8RegClass;
1776 HasRepeatedOperand =
true;
1780 RC = &AVR::DREGSRegClass;
1784 RC = &AVR::GPR8RegClass;
1788 RC = &AVR::DREGSRegClass;
1792 RC = &AVR::GPR8RegClass;
1796 RC = &AVR::DREGSRegClass;
1799 Opc =
Tiny ? AVR::ROLBRdR17 : AVR::ROLBRdR1;
1800 RC = &AVR::GPR8RegClass;
1804 RC = &AVR::DREGSRegClass;
1808 RC = &AVR::GPR8RegClass;
1812 RC = &AVR::DREGSRegClass;
1816 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1819 for (
I = BB->getIterator();
I !=
F->end() && &(*
I) != BB; ++
I)
1825 MachineBasicBlock *LoopBB =
F->CreateMachineBasicBlock(LLVM_BB);
1826 MachineBasicBlock *CheckBB =
F->CreateMachineBasicBlock(LLVM_BB);
1827 MachineBasicBlock *RemBB =
F->CreateMachineBasicBlock(LLVM_BB);
1829 F->insert(
I, LoopBB);
1830 F->insert(
I, CheckBB);
1831 F->insert(
I, RemBB);
1837 RemBB->transferSuccessorsAndUpdatePHIs(BB);
1840 BB->addSuccessor(CheckBB);
1841 LoopBB->addSuccessor(CheckBB);
1842 CheckBB->addSuccessor(LoopBB);
1843 CheckBB->addSuccessor(RemBB);
1845 Register ShiftAmtReg = RI.createVirtualRegister(&AVR::GPR8RegClass);
1846 Register ShiftAmtReg2 = RI.createVirtualRegister(&AVR::GPR8RegClass);
1847 Register ShiftReg = RI.createVirtualRegister(RC);
1848 Register ShiftReg2 = RI.createVirtualRegister(RC);
1849 Register ShiftAmtSrcReg =
MI.getOperand(2).getReg();
1859 auto ShiftMI =
BuildMI(LoopBB, dl,
TII.get(Opc), ShiftReg2).
addReg(ShiftReg);
1860 if (HasRepeatedOperand)
1861 ShiftMI.
addReg(ShiftReg);
1869 BuildMI(CheckBB, dl,
TII.get(AVR::PHI), ShiftReg)
1874 BuildMI(CheckBB, dl,
TII.get(AVR::PHI), ShiftAmtReg)
1879 BuildMI(CheckBB, dl,
TII.get(AVR::PHI), DstReg)
1885 BuildMI(CheckBB, dl,
TII.get(AVR::DECRd), ShiftAmtReg2).
addReg(ShiftAmtReg);
1888 MI.eraseFromParent();
1911 const bool ShiftLeft = Opc ==
ISD::SHL;
1912 const bool ArithmeticShift = Opc ==
ISD::SRA;
1915 Register ZeroReg =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
1923 if (ShiftLeft && (ShiftAmt % 8) >= 6) {
1928 size_t ShiftRegsOffset = ShiftAmt / 8;
1929 size_t ShiftRegsSize = Regs.size() - ShiftRegsOffset;
1931 Regs.
slice(ShiftRegsOffset, ShiftRegsSize);
1939 Register LowByte =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
1943 if (ShiftAmt % 8 == 6) {
1945 Register NewLowByte =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
1947 LowByte = NewLowByte;
1951 for (
size_t I = 0;
I < Regs.size();
I++) {
1952 int ShiftRegsIdx =
I + 1;
1953 if (ShiftRegsIdx < (
int)ShiftRegs.
size()) {
1954 Regs[
I] = ShiftRegs[ShiftRegsIdx];
1955 }
else if (ShiftRegsIdx == (
int)ShiftRegs.
size()) {
1956 Regs[
I] = std::pair(LowByte, 0);
1958 Regs[
I] = std::pair(ZeroReg, 0);
1966 if (!ShiftLeft && (ShiftAmt % 8) >= 6) {
1969 size_t ShiftRegsSize = Regs.size() - (ShiftAmt / 8);
1971 Regs.
slice(0, ShiftRegsSize);
1980 Register HighByte =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
1982 if (ArithmeticShift) {
2000 if (ShiftAmt % 8 == 6) {
2003 Register NewExt =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
2011 for (
int I = Regs.size() - 1;
I >= 0;
I--) {
2012 int ShiftRegsIdx =
I - (Regs.size() - ShiftRegs.
size()) - 1;
2013 if (ShiftRegsIdx >= 0) {
2014 Regs[
I] = ShiftRegs[ShiftRegsIdx];
2015 }
else if (ShiftRegsIdx == -1) {
2016 Regs[
I] = std::pair(HighByte, 0);
2018 Regs[
I] = std::pair(ExtByte, 0);
2027 while (ShiftLeft && ShiftAmt >= 8) {
2029 for (
size_t I = 0;
I < Regs.size() - 1;
I++) {
2030 Regs[
I] = Regs[
I + 1];
2034 Regs[Regs.size() - 1] = std::pair(ZeroReg, 0);
2037 Regs = Regs.drop_back(1);
2044 if (!ShiftLeft && ShiftAmt >= 8) {
2045 if (ArithmeticShift) {
2047 ShrExtendReg =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
2048 Register Tmp =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
2050 .
addReg(Regs[0].first, 0, Regs[0].second)
2051 .
addReg(Regs[0].first, 0, Regs[0].second);
2052 BuildMI(*BB,
MI, dl,
TII.get(AVR::SBCRdRr), ShrExtendReg)
2056 ShrExtendReg = ZeroReg;
2058 for (; ShiftAmt >= 8; ShiftAmt -= 8) {
2060 for (
size_t I = Regs.size() - 1;
I != 0;
I--) {
2061 Regs[
I] = Regs[
I - 1];
2065 Regs[0] = std::pair(ShrExtendReg, 0);
2068 Regs = Regs.drop_front(1);
2073 assert((ShiftAmt < 8) &&
"Unexpect shift amount");
2093 if (!ArithmeticShift && ShiftAmt >= 4) {
2095 for (
size_t I = 0;
I < Regs.size();
I++) {
2096 size_t Idx = ShiftLeft ?
I : Regs.size() -
I - 1;
2097 Register SwapReg =
MRI.createVirtualRegister(&AVR::LD8RegClass);
2101 Register R =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
2107 Register AndReg =
MRI.createVirtualRegister(&AVR::LD8RegClass);
2110 .
addImm(ShiftLeft ? 0xf0 : 0x0f);
2112 Register R =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
2116 size_t PrevIdx = ShiftLeft ?
Idx - 1 :
Idx + 1;
2117 Regs[PrevIdx] = std::pair(R, 0);
2120 Regs[
Idx] = std::pair(AndReg, 0);
2127 while (ShiftLeft && ShiftAmt) {
2129 for (ssize_t
I = Regs.size() - 1;
I >= 0;
I--) {
2130 Register Out =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
2133 if (
I == (ssize_t)Regs.size() - 1) {
2136 .
addReg(In, 0, InSubreg);
2140 .
addReg(In, 0, InSubreg);
2142 Regs[
I] = std::pair(Out, 0);
2146 while (!ShiftLeft && ShiftAmt) {
2148 for (
size_t I = 0;
I < Regs.size();
I++) {
2149 Register Out =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
2153 unsigned Opc = ArithmeticShift ? AVR::ASRRd : AVR::LSRRd;
2158 Regs[
I] = std::pair(Out, 0);
2163 if (ShiftAmt != 0) {
2170AVRTargetLowering::insertWideShift(MachineInstr &
MI,
2171 MachineBasicBlock *BB)
const {
2173 const DebugLoc &dl =
MI.getDebugLoc();
2177 int64_t ShiftAmt =
MI.getOperand(4).getImm();
2179 switch (
MI.getOpcode()) {
2192 std::array<std::pair<Register, int>, 4>
Registers = {
2193 std::pair(
MI.getOperand(3).getReg(), AVR::sub_hi),
2194 std::pair(
MI.getOperand(3).getReg(), AVR::sub_lo),
2195 std::pair(
MI.getOperand(2).getReg(), AVR::sub_hi),
2196 std::pair(
MI.getOperand(2).getReg(), AVR::sub_lo),
2214 (Opc !=
ISD::SRA || (ShiftAmt < 16 || ShiftAmt >= 22))) {
2216 BuildMI(*BB,
MI, dl,
TII.get(AVR::REG_SEQUENCE),
MI.getOperand(0).getReg())
2221 BuildMI(*BB,
MI, dl,
TII.get(AVR::REG_SEQUENCE),
MI.getOperand(1).getReg())
2228 BuildMI(*BB,
MI, dl,
TII.get(AVR::REG_SEQUENCE),
MI.getOperand(1).getReg())
2233 BuildMI(*BB,
MI, dl,
TII.get(AVR::REG_SEQUENCE),
MI.getOperand(0).getReg())
2241 MI.eraseFromParent();
2246 if (
I->getOpcode() == AVR::COPY) {
2247 Register SrcReg =
I->getOperand(1).getReg();
2248 return (SrcReg == AVR::R0 || SrcReg == AVR::R1);
2257MachineBasicBlock *AVRTargetLowering::insertMul(MachineInstr &
MI,
2258 MachineBasicBlock *BB)
const {
2266 BuildMI(*BB,
I,
MI.getDebugLoc(),
TII.get(AVR::EORRdRr), AVR::R1)
2274AVRTargetLowering::insertCopyZero(MachineInstr &
MI,
2275 MachineBasicBlock *BB)
const {
2279 .
add(
MI.getOperand(0))
2281 MI.eraseFromParent();
2287MachineBasicBlock *AVRTargetLowering::insertAtomicArithmeticOp(
2288 MachineInstr &
MI, MachineBasicBlock *BB,
unsigned Opcode,
int Width)
const {
2289 MachineRegisterInfo &
MRI = BB->getParent()->getRegInfo();
2303 const TargetRegisterClass *RC =
2304 (Width == 8) ? &AVR::GPR8RegClass : &AVR::DREGSRegClass;
2305 unsigned LoadOpcode = (Width == 8) ? AVR::LDRdPtr : AVR::LDWRdPtr;
2306 unsigned StoreOpcode = (Width == 8) ? AVR::STPtrRr : AVR::STWPtrRr;
2314 BuildMI(*BB,
I, dl,
TII.get(LoadOpcode),
MI.getOperand(0).getReg())
2315 .
add(
MI.getOperand(1));
2321 .
add(
MI.getOperand(2));
2325 .
add(
MI.getOperand(1))
2334 MI.eraseFromParent();
2341 int Opc =
MI.getOpcode();
2357 return insertShift(
MI,
MBB, STI.hasTinyEncoding());
2361 return insertWideShift(
MI,
MBB);
2364 return insertMul(
MI,
MBB);
2366 return insertCopyZero(
MI,
MBB);
2367 case AVR::AtomicLoadAdd8:
2368 return insertAtomicArithmeticOp(
MI,
MBB, AVR::ADDRdRr, 8);
2369 case AVR::AtomicLoadAdd16:
2370 return insertAtomicArithmeticOp(
MI,
MBB, AVR::ADDWRdRr, 16);
2371 case AVR::AtomicLoadSub8:
2372 return insertAtomicArithmeticOp(
MI,
MBB, AVR::SUBRdRr, 8);
2373 case AVR::AtomicLoadSub16:
2374 return insertAtomicArithmeticOp(
MI,
MBB, AVR::SUBWRdRr, 16);
2375 case AVR::AtomicLoadAnd8:
2376 return insertAtomicArithmeticOp(
MI,
MBB, AVR::ANDRdRr, 8);
2377 case AVR::AtomicLoadAnd16:
2378 return insertAtomicArithmeticOp(
MI,
MBB, AVR::ANDWRdRr, 16);
2379 case AVR::AtomicLoadOr8:
2380 return insertAtomicArithmeticOp(
MI,
MBB, AVR::ORRdRr, 8);
2381 case AVR::AtomicLoadOr16:
2382 return insertAtomicArithmeticOp(
MI,
MBB, AVR::ORWRdRr, 16);
2383 case AVR::AtomicLoadXor8:
2384 return insertAtomicArithmeticOp(
MI,
MBB, AVR::EORRdRr, 8);
2385 case AVR::AtomicLoadXor16:
2386 return insertAtomicArithmeticOp(
MI,
MBB, AVR::EORWRdRr, 16);
2389 assert((Opc == AVR::Select16 || Opc == AVR::Select8) &&
2390 "Unexpected instr type to insert");
2411 if (FallThrough !=
nullptr) {
2427 unsigned CallFrameSize =
TII.getCallFrameSizeAt(
MI);
2441 MBB->addSuccessor(falseMBB);
2442 MBB->addSuccessor(trueMBB);
2450 MI.getOperand(0).getReg())
2456 MI.eraseFromParent();
2466 if (Constraint.
size() == 1) {
2468 switch (Constraint[0]) {
2511 switch (ConstraintCode[0]) {
2522 Value *CallOperandVal =
info.CallOperandVal;
2527 if (!CallOperandVal) {
2532 switch (*constraint) {
2556 if (
const ConstantFP *
C = dyn_cast<ConstantFP>(CallOperandVal)) {
2563 if (
const ConstantInt *
C = dyn_cast<ConstantInt>(CallOperandVal)) {
2564 if (isUInt<6>(
C->getZExtValue())) {
2570 if (
const ConstantInt *
C = dyn_cast<ConstantInt>(CallOperandVal)) {
2571 if ((
C->getSExtValue() >= -63) && (
C->getSExtValue() <= 0)) {
2577 if (
const ConstantInt *
C = dyn_cast<ConstantInt>(CallOperandVal)) {
2578 if (
C->getZExtValue() == 2) {
2584 if (
const ConstantInt *
C = dyn_cast<ConstantInt>(CallOperandVal)) {
2585 if (
C->getZExtValue() == 0) {
2591 if (
const ConstantInt *
C = dyn_cast<ConstantInt>(CallOperandVal)) {
2592 if (isUInt<8>(
C->getZExtValue())) {
2598 if (
const ConstantInt *
C = dyn_cast<ConstantInt>(CallOperandVal)) {
2599 if (
C->getSExtValue() == -1) {
2605 if (
const ConstantInt *
C = dyn_cast<ConstantInt>(CallOperandVal)) {
2606 if ((
C->getZExtValue() == 8) || (
C->getZExtValue() == 16) ||
2607 (
C->getZExtValue() == 24)) {
2613 if (
const ConstantInt *
C = dyn_cast<ConstantInt>(CallOperandVal)) {
2614 if (
C->getZExtValue() == 1) {
2620 if (
const ConstantInt *
C = dyn_cast<ConstantInt>(CallOperandVal)) {
2621 if ((
C->getSExtValue() >= -6) && (
C->getSExtValue() <= 5)) {
2634std::pair<unsigned, const TargetRegisterClass *>
2638 if (Constraint.
size() == 1) {
2639 switch (Constraint[0]) {
2642 return std::make_pair(0U, &AVR::LD8loRegClass);
2643 else if (VT == MVT::i16)
2644 return std::make_pair(0U, &AVR::DREGSLD8loRegClass);
2647 if (VT == MVT::i8 || VT == MVT::i16)
2648 return std::make_pair(0U, &AVR::PTRDISPREGSRegClass);
2652 return std::make_pair(0U, &AVR::LD8RegClass);
2653 else if (VT == MVT::i16)
2654 return std::make_pair(0U, &AVR::DLDREGSRegClass);
2658 return std::make_pair(0U, &AVR::GPR8loRegClass);
2659 else if (VT == MVT::i16)
2660 return std::make_pair(0U, &AVR::DREGSloRegClass);
2663 if (VT == MVT::i8 || VT == MVT::i16)
2664 return std::make_pair(0U, &AVR::PTRREGSRegClass);
2667 return std::make_pair(0U, &AVR::GPRSPRegClass);
2670 return std::make_pair(0U, &AVR::GPR8RegClass);
2671 else if (VT == MVT::i16)
2672 return std::make_pair(0U, &AVR::DREGSRegClass);
2677 &AVR::GPR8RegClass);
2680 if (VT == MVT::i8 || VT == MVT::i16)
2681 return std::make_pair(0U, &AVR::IWREGSRegClass);
2685 if (VT == MVT::i8 || VT == MVT::i16)
2686 return std::make_pair(
unsigned(AVR::R27R26), &AVR::PTRREGSRegClass);
2690 if (VT == MVT::i8 || VT == MVT::i16)
2691 return std::make_pair(
unsigned(AVR::R29R28), &AVR::PTRREGSRegClass);
2695 if (VT == MVT::i8 || VT == MVT::i16)
2696 return std::make_pair(
unsigned(AVR::R31R30), &AVR::PTRREGSRegClass);
2709 std::vector<SDValue> &Ops,
2713 EVT Ty =
Op.getValueType();
2716 if (Constraint.
size() != 1) {
2720 char ConstraintLetter = Constraint[0];
2721 switch (ConstraintLetter) {
2739 int64_t CVal64 =
C->getSExtValue();
2741 switch (ConstraintLetter) {
2743 if (!isUInt<6>(CUVal64))
2748 if (CVal64 < -63 || CVal64 > 0)
2763 if (!isUInt<8>(CUVal64))
2779 if (CUVal64 != 8 && CUVal64 != 16 && CUVal64 != 24)
2789 if (CVal64 < -6 || CVal64 > 5)
2799 if (!FC || !FC->isZero())
2806 if (Result.getNode()) {
2807 Ops.push_back(Result);
2820 .
Case(
"r0", AVR::R0)
2821 .
Case(
"r1", AVR::R1)
2825 .
Case(
"r0", AVR::R1R0)
2826 .
Case(
"sp", AVR::SP)
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
const HexagonInstrInfo * TII
unsigned const TargetRegisterInfo * TRI
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
SI Pre allocate WWM Registers
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
Utilities related to the AVR instruction set.
A specific AVR target MCU.
Register getTmpRegister() const
Register getZeroRegister() const
const AVRInstrInfo * getInstrInfo() const override
const AVRRegisterInfo * getRegisterInfo() const override
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
Replace a node with an illegal result type with a new node built out of custom code.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
Returns true by value, base pointer and offset pointer and addressing mode by reference if the node's...
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
ConstraintType getConstraintType(StringRef Constraint) const override
Given a constraint, return the type of constraint it is for this target.
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
const AVRSubtarget & Subtarget
InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const override
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
AVRTargetLowering(const AVRTargetMachine &TM, const AVRSubtarget &STI)
void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
Lower the specified operand into the Ops vector.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
Return the ValueType of the result of SETCC operations.
bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
Returns true by value, base pointer and offset pointer and addressing mode by reference if this node ...
A generic AVR implementation.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
LLVM Basic Block Representation.
CCState - This class holds information needed while lowering arguments and return values.
MCRegister AllocateReg(MCPhysReg Reg)
AllocateReg - Attempt to allocate one register.
LLVMContext & getContext() const
int64_t AllocateStack(unsigned Size, Align Alignment)
AllocateStack - Allocate a chunk of stack space with the specified size and alignment.
void addLoc(const CCValAssign &V)
static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)
static CCValAssign getReg(unsigned ValNo, MVT ValVT, unsigned RegNo, MVT LocVT, LocInfo HTP, bool IsCustom=false)
ConstantFP - Floating Point Values [float, double].
This is the shared class of boolean and integer constants.
This class represents an Operation in the Expression.
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
Align getABITypeAlign(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
This is an important class for using LLVM in a threaded context.
This class is used to represent ISD::LOAD nodes.
static auto integer_valuetypes()
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
void setCallFrameSize(unsigned N)
Set the call frame size on entry to this basic block.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
BasicBlockListType::iterator iterator
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
MutableArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDValue getValue(unsigned R) const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
This class is used to represent ISD::STORE nodes.
StringRef - Represent a constant reference to a string, i.e.
constexpr size_t size() const
size - Get the string size.
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
static StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
TargetInstrInfo - Interface to description of machine instruction set.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const
Get the CallingConv that should be used for the specified libcall.
void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC)
Set the CallingConv that should be used for the specified libcall.
void setIndexedLoadAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed load does or does not work with the specified type and indicate w...
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
void setIndexedStoreAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed store does or does not work with the specified type and indicate ...
void setSupportsUnalignedAtomics(bool UnalignedSupported)
Sets whether unaligned atomic operations are supported.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
void setMinimumJumpTableEntries(unsigned Val)
Indicate the minimum number of blocks to generate jump tables.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
@ ZeroOrOneBooleanContent
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
std::vector< ArgListEntry > ArgListTy
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetInstrInfo * getInstrInfo() const
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM Value Representation.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
CondCodes
AVR specific condition codes.
@ COND_SH
Unsigned same or higher.
@ COND_GE
Greater than or equal.
@ ASRWN
Word arithmetic shift right N bits.
@ RET_GLUE
Return from subroutine.
@ SWAP
Swap Rd[7:4] <-> Rd[3:0].
@ RETI_GLUE
Return from ISR.
@ LSLW
Wide logical shift left.
@ ROLLOOP
A loop of single left bit rotate instructions.
@ ASRLO
Lower 8-bit of word arithmetic shift right.
@ ASRLOOP
A loop of single arithmetic shift right instructions.
@ LSRLOOP
A loop of single logical shift right instructions.
@ LSR
Logical shift right.
@ LSRLO
Lower 8-bit of word logical shift right.
@ TST
Test for zero or minus instruction.
@ LSRBN
Byte logical shift right N bits.
@ ASRW
Wide arithmetic shift right.
@ SELECT_CC
Operand 0 and operand 1 are selection variable, operand 2 is condition code and operand 3 is flag ope...
@ CMPC
Compare with carry instruction.
@ LSLWN
Word logical shift left N bits.
@ RORLOOP
A loop of single right bit rotate instructions.
@ CMP
Compare instruction.
@ ASRBN
Byte arithmetic shift right N bits.
@ CALL
Represents an abstract call instruction, which includes a bunch of information.
@ ASR
Arithmetic shift right.
@ LSRW
Wide logical shift right.
@ LSLBN
Byte logical shift left N bits.
@ LSLHI
Higher 8-bit of word logical shift left.
@ LSRWN
Word logical shift right N bits.
@ WRAPPER
A wrapper node for TargetConstantPool, TargetExternalSymbol, and TargetGlobalAddress.
@ LSLLOOP
A loop of single logical shift left instructions.
@ BRCOND
AVR conditional branches.
bool isProgramMemoryAccess(MemSDNode const *N)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ AVR_BUILTIN
Used for special AVR rtlib functions which have an "optimized" convention to preserve registers.
@ C
The default llvm calling convention, compatible with C.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ BSWAP
Byte Swap and Counting operators.
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
@ ADD
Simple integer binary arithmetic operators.
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ SIGN_EXTEND
Conversion operators.
@ BR_CC
BR_CC - Conditional branch.
@ BR_JT
BR_JT - Jumptable branch.
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ BasicBlock
Various leaf nodes.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ ATOMIC_CMP_SWAP
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo,...
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
@ INLINEASM
INLINEASM - Represents an inline asm block.
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ BRCOND
BRCOND - Conditional branch.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.
@ Undef
Value of the register doesn't matter.
Type
MessagePack types as defined in the standard, with the exception of Integer being divided into a sign...
This is an optimization pass for GlobalISel generic memory operations.
static void analyzeReturnValues(const SmallVectorImpl< ArgT > &Args, CCState &CCInfo, bool Tiny)
Analyze incoming and outgoing value of returning from a function.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
static const MCPhysReg RegList16Tiny[]
static const MCPhysReg RegList8Tiny[]
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
static void analyzeArguments(TargetLowering::CallLoweringInfo *CLI, const Function *F, const DataLayout *TD, const SmallVectorImpl< ArgT > &Args, SmallVectorImpl< CCValAssign > &ArgLocs, CCState &CCInfo, bool Tiny)
Analyze incoming and outgoing function arguments.
static const MCPhysReg RegList16AVR[]
static unsigned getTotalArgumentsSizeInBytes(const SmallVectorImpl< ArgT > &Args)
Count the total number of bytes needed to pass or return these arguments.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
DWARFExpression::Operation Op
static AVRCC::CondCodes intCCToAVRCC(ISD::CondCode CC)
IntCCToAVRCC - Convert a DAG integer condition code to an AVR CC.
static bool isCopyMulResult(MachineBasicBlock::iterator const &I)
static void insertMultibyteShift(MachineInstr &MI, MachineBasicBlock *BB, MutableArrayRef< std::pair< Register, int > > Regs, ISD::NodeType Opc, int64_t ShiftAmt)
static const MCPhysReg RegList8AVR[]
Registers for calling conventions, ordered in reverse as required by ABI.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
bool isVector() const
Return true if this is a vector value type.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
This contains information for each constraint that we are lowering.
This structure contains all information that is necessary for lowering calls.