178 if (!
Subtarget.supportsMultiplication()) {
283 EVT VT =
Op.getValueType();
286 "Expected power-of-2 shift amount");
289 if (!isa<ConstantSDNode>(
N->getOperand(1))) {
294 SDVTList ResTys = DAG.
getVTList(MVT::i16, MVT::i16);
302 cast<ConstantSDNode>(
N->getOperand(1))->getZExtValue();
303 if (ShiftAmount == 16) {
308 switch (
Op.getOpcode()) {
321 switch (
Op.getOpcode()) {
334 SDValue
Result = DAG.
getNode(Opc, dl, ResTys, SrcLo, SrcHi, Cnt);
340 if (!isa<ConstantSDNode>(
N->getOperand(1))) {
341 switch (
Op.getOpcode()) {
351 SDValue Amt =
N->getOperand(1);
352 EVT AmtVT = Amt.getValueType();
358 SDValue Amt =
N->getOperand(1);
359 EVT AmtVT = Amt.getValueType();
370 uint64_t ShiftAmount = cast<ConstantSDNode>(
N->getOperand(1))->getZExtValue();
371 SDValue Victim =
N->getOperand(0);
373 switch (
Op.getOpcode()) {
397 if (
Op.getOpcode() ==
ISD::SHL && 4 <= ShiftAmount && ShiftAmount < 7) {
403 }
else if (
Op.getOpcode() ==
ISD::SRL && 4 <= ShiftAmount &&
410 }
else if (
Op.getOpcode() ==
ISD::SHL && ShiftAmount == 7) {
415 }
else if (
Op.getOpcode() ==
ISD::SRL && ShiftAmount == 7) {
420 }
else if (
Op.getOpcode() ==
ISD::SRA && ShiftAmount == 6) {
425 }
else if (
Op.getOpcode() ==
ISD::SRA && ShiftAmount == 7) {
430 }
else if (
Op.getOpcode() ==
ISD::ROTL && ShiftAmount == 3) {
436 }
else if (
Op.getOpcode() ==
ISD::ROTR && ShiftAmount == 3) {
442 }
else if (
Op.getOpcode() ==
ISD::ROTL && ShiftAmount == 7) {
447 }
else if (
Op.getOpcode() ==
ISD::ROTR && ShiftAmount == 7) {
461 switch (ShiftAmount) {
480 if (4 <= ShiftAmount && ShiftAmount < 8)
481 switch (
Op.getOpcode()) {
495 else if (8 <= ShiftAmount && ShiftAmount < 12)
496 switch (
Op.getOpcode()) {
521 else if (12 <= ShiftAmount)
522 switch (
Op.getOpcode()) {
549 while (ShiftAmount--) {
550 Victim = DAG.
getNode(Opc8, dl, VT, Victim);
556SDValue AVRTargetLowering::LowerDivRem(SDValue
Op, SelectionDAG &DAG)
const {
557 unsigned Opcode =
Op->getOpcode();
559 "Invalid opcode for Div/Rem lowering");
561 EVT VT =
Op->getValueType(0);
562 Type *Ty = VT.getTypeForEVT(*DAG.getContext());
565 switch (VT.getSimpleVT().SimpleTy) {
569 LC = IsSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8;
572 LC = IsSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16;
575 LC = IsSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32;
579 SDValue InChain = DAG.getEntryNode();
582 TargetLowering::ArgListEntry Entry;
583 for (SDValue
const &Value :
Op->op_values()) {
585 Entry.Ty =
Value.getValueType().getTypeForEVT(*DAG.getContext());
586 Entry.IsSExt = IsSigned;
587 Entry.IsZExt = !IsSigned;
588 Args.push_back(Entry);
597 TargetLowering::CallLoweringInfo CLI(DAG);
602 .setSExtResult(IsSigned)
603 .setZExtResult(!IsSigned);
605 std::pair<SDValue, SDValue> CallInfo =
LowerCallTo(CLI);
606 return CallInfo.first;
609SDValue AVRTargetLowering::LowerGlobalAddress(SDValue
Op,
610 SelectionDAG &DAG)
const {
611 auto DL = DAG.getDataLayout();
613 const GlobalValue *GV = cast<GlobalAddressSDNode>(
Op)->getGlobal();
614 int64_t
Offset = cast<GlobalAddressSDNode>(
Op)->getOffset();
622SDValue AVRTargetLowering::LowerBlockAddress(SDValue
Op,
623 SelectionDAG &DAG)
const {
624 auto DL = DAG.getDataLayout();
625 const BlockAddress *BA = cast<BlockAddressSDNode>(
Op)->getBlockAddress();
653SDValue AVRTargetLowering::getAVRCmp(SDValue
LHS, SDValue
RHS,
654 SelectionDAG &DAG, SDLoc
DL)
const {
655 assert((
LHS.getSimpleValueType() ==
RHS.getSimpleValueType()) &&
656 "LHS and RHS have different types");
657 assert(((
LHS.getSimpleValueType() == MVT::i16) ||
658 (
LHS.getSimpleValueType() == MVT::i8)) &&
659 "invalid comparison type");
663 if (
LHS.getSimpleValueType() == MVT::i16 && isa<ConstantSDNode>(
RHS)) {
664 uint64_t Imm = cast<ConstantSDNode>(
RHS)->getZExtValue();
668 DAG.getIntPtrConstant(0,
DL));
670 DAG.getIntPtrConstant(1,
DL));
671 SDValue RHSlo = (Imm & 0xff) == 0
674 DAG.getIntPtrConstant(0,
DL));
675 SDValue RHShi = (Imm & 0xff00) == 0
678 DAG.getIntPtrConstant(1,
DL));
680 Cmp = DAG.getNode(
AVRISD::CMPC,
DL, MVT::Glue, LHShi, RHShi, Cmp);
681 }
else if (
RHS.getSimpleValueType() == MVT::i16 && isa<ConstantSDNode>(
LHS)) {
685 SDValue LHSlo = (
Imm & 0xff) == 0
688 DAG.getIntPtrConstant(0,
DL));
689 SDValue LHShi = (
Imm & 0xff00) == 0
692 DAG.getIntPtrConstant(1,
DL));
694 DAG.getIntPtrConstant(0,
DL));
696 DAG.getIntPtrConstant(1,
DL));
710 SDValue &AVRcc, SelectionDAG &DAG,
713 EVT VT =
LHS.getValueType();
714 bool UseTest =
false;
726 if (
const ConstantSDNode *
C = dyn_cast<ConstantSDNode>(
RHS)) {
727 switch (
C->getSExtValue()) {
739 LHS = DAG.getConstant(0,
DL, VT);
746 RHS = DAG.getConstant(
C->getSExtValue() + 1,
DL, VT);
759 if (
const ConstantSDNode *
C = dyn_cast<ConstantSDNode>(
RHS)) {
760 switch (
C->getSExtValue()) {
765 LHS = DAG.getConstant(0,
DL, VT);
789 if (
const ConstantSDNode *
C = dyn_cast<ConstantSDNode>(
RHS)) {
790 RHS = DAG.getConstant(
C->getSExtValue() + 1,
DL, VT);
803 if (VT == MVT::i32) {
805 DAG.getIntPtrConstant(0,
DL));
807 DAG.getIntPtrConstant(1,
DL));
809 DAG.getIntPtrConstant(0,
DL));
811 DAG.getIntPtrConstant(1,
DL));
816 DAG.getIntPtrConstant(1,
DL));
819 Cmp = getAVRCmp(LHSlo, RHSlo, DAG,
DL);
822 }
else if (VT == MVT::i64) {
824 DAG.getIntPtrConstant(0,
DL));
826 DAG.getIntPtrConstant(1,
DL));
829 DAG.getIntPtrConstant(0,
DL));
831 DAG.getIntPtrConstant(1,
DL));
833 DAG.getIntPtrConstant(0,
DL));
835 DAG.getIntPtrConstant(1,
DL));
838 DAG.getIntPtrConstant(0,
DL));
840 DAG.getIntPtrConstant(1,
DL));
843 DAG.getIntPtrConstant(0,
DL));
845 DAG.getIntPtrConstant(1,
DL));
847 DAG.getIntPtrConstant(0,
DL));
849 DAG.getIntPtrConstant(1,
DL));
854 DAG.getIntPtrConstant(1,
DL));
857 Cmp = getAVRCmp(LHS0, RHS0, DAG,
DL);
862 }
else if (VT == MVT::i8 || VT == MVT::i16) {
869 LHS, DAG.getIntPtrConstant(1,
DL)));
885SDValue AVRTargetLowering::LowerBR_CC(SDValue
Op, SelectionDAG &DAG)
const {
886 SDValue Chain =
Op.getOperand(0);
888 SDValue
LHS =
Op.getOperand(2);
889 SDValue
RHS =
Op.getOperand(3);
890 SDValue Dest =
Op.getOperand(4);
894 SDValue
Cmp = getAVRCmp(
LHS,
RHS,
CC, TargetCC, DAG, dl);
896 return DAG.getNode(
AVRISD::BRCOND, dl, MVT::Other, Chain, Dest, TargetCC,
900SDValue AVRTargetLowering::LowerSELECT_CC(SDValue
Op, SelectionDAG &DAG)
const {
901 SDValue
LHS =
Op.getOperand(0);
902 SDValue
RHS =
Op.getOperand(1);
903 SDValue TrueV =
Op.getOperand(2);
904 SDValue FalseV =
Op.getOperand(3);
909 SDValue
Cmp = getAVRCmp(
LHS,
RHS,
CC, TargetCC, DAG, dl);
911 SDVTList VTs = DAG.getVTList(
Op.getValueType(), MVT::Glue);
912 SDValue Ops[] = {TrueV, FalseV, TargetCC,
Cmp};
917SDValue AVRTargetLowering::LowerSETCC(SDValue
Op, SelectionDAG &DAG)
const {
918 SDValue
LHS =
Op.getOperand(0);
919 SDValue
RHS =
Op.getOperand(1);
926 SDValue TrueV = DAG.getConstant(1,
DL,
Op.getValueType());
927 SDValue FalseV = DAG.getConstant(0,
DL,
Op.getValueType());
928 SDVTList VTs = DAG.getVTList(
Op.getValueType(), MVT::Glue);
929 SDValue Ops[] = {TrueV, FalseV, TargetCC,
Cmp};
934SDValue AVRTargetLowering::LowerVASTART(SDValue
Op, SelectionDAG &DAG)
const {
935 const MachineFunction &MF = DAG.getMachineFunction();
936 const AVRMachineFunctionInfo *AFI = MF.getInfo<AVRMachineFunctionInfo>();
937 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
938 auto DL = DAG.getDataLayout();
943 SDValue FI = DAG.getFrameIndex(AFI->getVarArgsFrameIndex(),
getPointerTy(
DL));
945 return DAG.getStore(
Op.getOperand(0), dl, FI,
Op.getOperand(1),
946 MachinePointerInfo(SV));
950SDValue AVRTargetLowering::LowerINLINEASM(SDValue
Op, SelectionDAG &DAG)
const {
966 SmallVector<SDValue, 8> Ops;
967 SDNode *
N =
Op.getNode();
969 for (
unsigned I = 0;
I <
N->getNumOperands();
I++) {
970 SDValue Operand =
N->getOperand(
I);
971 if (Operand.getValueType() == MVT::Glue) {
976 Ops.push_back(Operand);
980 Ops.push_back(DAG.getTargetConstant(Flags, dl, MVT::i32));
981 Ops.push_back(ZeroReg);
988 SDValue
New = DAG.getNode(
N->getOpcode(), dl,
N->getVTList(), Ops);
989 DAG.ReplaceAllUsesOfValueWith(
Op, New);
990 DAG.ReplaceAllUsesOfValueWith(
Op.getValue(1),
New.getValue(1));
996 switch (
Op.getOpcode()) {
1004 return LowerShifts(
Op, DAG);
1006 return LowerGlobalAddress(
Op, DAG);
1008 return LowerBlockAddress(
Op, DAG);
1010 return LowerBR_CC(
Op, DAG);
1012 return LowerSELECT_CC(
Op, DAG);
1014 return LowerSETCC(
Op, DAG);
1016 return LowerVASTART(
Op, DAG);
1019 return LowerDivRem(
Op, DAG);
1021 return LowerINLINEASM(
Op, DAG);
1034 switch (
N->getOpcode()) {
1037 if (
const ConstantSDNode *
C = dyn_cast<ConstantSDNode>(
N->getOperand(1))) {
1096 if (
const LoadSDNode *LD = dyn_cast<LoadSDNode>(
N)) {
1097 VT = LD->getMemoryVT();
1098 Op = LD->getBasePtr().getNode();
1104 }
else if (
const StoreSDNode *ST = dyn_cast<StoreSDNode>(
N)) {
1105 VT = ST->getMemoryVT();
1106 Op = ST->getBasePtr().getNode();
1114 if (VT != MVT::i8 && VT != MVT::i16) {
1123 int RHSC =
RHS->getSExtValue();
1127 if ((VT == MVT::i16 && RHSC != -2) || (VT == MVT::i8 && RHSC != -1)) {
1131 Base =
Op->getOperand(0);
1152 if (
const LoadSDNode *LD = dyn_cast<LoadSDNode>(
N)) {
1153 VT = LD->getMemoryVT();
1156 }
else if (
const StoreSDNode *ST = dyn_cast<StoreSDNode>(
N)) {
1157 VT = ST->getMemoryVT();
1165 if (VT == MVT::i16 && !
Subtarget.hasLowByteFirst())
1171 if (VT != MVT::i8 && VT != MVT::i16) {
1180 int RHSC =
RHS->getSExtValue();
1183 if ((VT == MVT::i16 && RHSC != 2) || (VT == MVT::i8 && RHSC != 1)) {
1189 if (
const LoadSDNode *LD = dyn_cast<LoadSDNode>(
N))
1193 Base =
Op->getOperand(0);
1212#include "AVRGenCallingConv.inc"
1217 AVR::R25, AVR::R24, AVR::R23, AVR::R22, AVR::R21, AVR::R20,
1218 AVR::R19, AVR::R18, AVR::R17, AVR::R16, AVR::R15, AVR::R14,
1219 AVR::R13, AVR::R12, AVR::R11, AVR::R10, AVR::R9, AVR::R8};
1221 AVR::R22, AVR::R21, AVR::R20};
1223 AVR::R26R25, AVR::R25R24, AVR::R24R23, AVR::R23R22, AVR::R22R21,
1224 AVR::R21R20, AVR::R20R19, AVR::R19R18, AVR::R18R17, AVR::R17R16,
1225 AVR::R16R15, AVR::R15R14, AVR::R14R13, AVR::R13R12, AVR::R12R11,
1226 AVR::R11R10, AVR::R10R9, AVR::R9R8};
1228 AVR::R24R23, AVR::R23R22,
1229 AVR::R22R21, AVR::R21R20};
1232 "8-bit and 16-bit register arrays must be of equal length");
1234 "8-bit and 16-bit register arrays must be of equal length");
1240template <
typename ArgT>
1257 unsigned NumArgs = Args.size();
1260 int RegLastIdx = -1;
1262 bool UseStack =
false;
1263 for (
unsigned i = 0; i != NumArgs;) {
1264 MVT VT = Args[i].VT;
1269 unsigned ArgIndex = Args[i].OrigArgIndex;
1272 for (; j != NumArgs; ++j) {
1273 if (Args[j].OrigArgIndex != ArgIndex)
1275 TotalBytes += Args[j].VT.getStoreSize();
1278 TotalBytes =
alignTo(TotalBytes, 2);
1280 if (TotalBytes == 0)
1283 unsigned RegIdx = RegLastIdx + TotalBytes;
1284 RegLastIdx = RegIdx;
1286 if (RegIdx >= RegList8.
size()) {
1289 for (; i != j; ++i) {
1290 MVT VT = Args[i].VT;
1300 if (VT == MVT::i8) {
1302 }
else if (VT == MVT::i16) {
1306 "calling convention can only manage i8 and i16 types");
1308 assert(
Reg &&
"register not available in calling convention");
1319template <
typename ArgT>
1322 unsigned TotalBytes = 0;
1324 for (
const ArgT &Arg : Args) {
1325 TotalBytes += Arg.VT.getStoreSize();
1333template <
typename ArgT>
1336 unsigned NumArgs = Args.size();
1340 assert(TotalBytes <= 4 &&
1341 "return values greater than 4 bytes cannot be lowered on AVRTiny");
1343 assert(TotalBytes <= 8 &&
1344 "return values greater than 8 bytes cannot be lowered on AVR");
1359 if (TotalBytes > 4) {
1362 TotalBytes =
alignTo(TotalBytes, 2);
1366 int RegIdx = TotalBytes - 1;
1367 for (
unsigned i = 0; i != NumArgs; ++i) {
1368 MVT VT = Args[i].VT;
1370 if (VT == MVT::i8) {
1372 }
else if (VT == MVT::i16) {
1377 assert(
Reg &&
"register not available in calling convention");
1384SDValue AVRTargetLowering::LowerFormalArguments(
1386 const SmallVectorImpl<ISD::InputArg> &Ins,
const SDLoc &dl,
1387 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals)
const {
1388 MachineFunction &MF = DAG.getMachineFunction();
1389 MachineFrameInfo &MFI = MF.getFrameInfo();
1390 auto DL = DAG.getDataLayout();
1393 SmallVector<CCValAssign, 16> ArgLocs;
1394 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1399 CCInfo.AnalyzeFormalArguments(Ins, ArgCC_AVR_Vararg);
1406 for (CCValAssign &VA : ArgLocs) {
1409 if (VA.isRegLoc()) {
1410 EVT RegVT = VA.getLocVT();
1411 const TargetRegisterClass *RC;
1412 if (RegVT == MVT::i8) {
1413 RC = &AVR::GPR8RegClass;
1414 }
else if (RegVT == MVT::i16) {
1415 RC = &AVR::DREGSRegClass;
1421 ArgValue = DAG.getCopyFromReg(Chain, dl,
Reg, RegVT);
1429 switch (VA.getLocInfo()) {
1435 ArgValue = DAG.getNode(
ISD::BITCAST, dl, VA.getValVT(), ArgValue);
1439 DAG.getValueType(VA.getValVT()));
1440 ArgValue = DAG.getNode(
ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
1444 DAG.getValueType(VA.getValVT()));
1445 ArgValue = DAG.getNode(
ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
1449 InVals.push_back(ArgValue);
1454 EVT LocVT = VA.getLocVT();
1457 int FI = MFI.CreateFixedObject(LocVT.getSizeInBits() / 8,
1458 VA.getLocMemOffset(),
true);
1463 InVals.push_back(DAG.getLoad(LocVT, dl, Chain, FIN,
1471 unsigned StackSize = CCInfo.getStackSize();
1472 AVRMachineFunctionInfo *AFI = MF.getInfo<AVRMachineFunctionInfo>();
1474 AFI->setVarArgsFrameIndex(MFI.CreateFixedObject(2, StackSize,
true));
1484SDValue AVRTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
1485 SmallVectorImpl<SDValue> &InVals)
const {
1486 SelectionDAG &DAG = CLI.DAG;
1488 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
1489 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
1490 SmallVectorImpl<ISD::InputArg> &
Ins = CLI.Ins;
1491 SDValue Chain = CLI.Chain;
1492 SDValue
Callee = CLI.Callee;
1493 bool &isTailCall = CLI.IsTailCall;
1495 bool isVarArg = CLI.IsVarArg;
1497 MachineFunction &MF = DAG.getMachineFunction();
1503 SmallVector<CCValAssign, 16> ArgLocs;
1504 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1511 if (
const GlobalAddressSDNode *
G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1512 const GlobalValue *GV =
G->getGlobal();
1513 if (isa<Function>(GV))
1514 F = cast<Function>(GV);
1516 DAG.getTargetGlobalAddress(GV,
DL,
getPointerTy(DAG.getDataLayout()));
1517 }
else if (
const ExternalSymbolSDNode *ES =
1518 dyn_cast<ExternalSymbolSDNode>(Callee)) {
1519 Callee = DAG.getTargetExternalSymbol(ES->getSymbol(),
1525 CCInfo.AnalyzeCallOperands(Outs, ArgCC_AVR_Vararg);
1532 unsigned NumBytes = CCInfo.getStackSize();
1534 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0,
DL);
1536 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
1540 bool HasStackArgs =
false;
1541 for (AI = 0, AE = ArgLocs.size(); AI != AE; ++AI) {
1542 CCValAssign &VA = ArgLocs[AI];
1543 EVT RegVT = VA.getLocVT();
1544 SDValue Arg = OutVals[AI];
1547 switch (VA.getLocInfo()) {
1568 if (VA.isMemLoc()) {
1569 HasStackArgs =
true;
1575 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1585 SmallVector<SDValue, 8> MemOpChains;
1586 for (; AI != AE; AI++) {
1587 CCValAssign &VA = ArgLocs[AI];
1588 SDValue Arg = OutVals[AI];
1593 SDValue PtrOff = DAG.getNode(
1595 DAG.getRegister(AVR::SP,
getPointerTy(DAG.getDataLayout())),
1596 DAG.getIntPtrConstant(VA.getLocMemOffset() + 1,
DL));
1598 MemOpChains.push_back(
1599 DAG.getStore(Chain,
DL, Arg, PtrOff,
1603 if (!MemOpChains.empty())
1611 for (
auto Reg : RegsToPass) {
1612 Chain = DAG.getCopyToReg(Chain,
DL,
Reg.first,
Reg.second, InGlue);
1613 InGlue = Chain.getValue(1);
1617 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1618 SmallVector<SDValue, 8> Ops;
1619 Ops.push_back(Chain);
1620 Ops.push_back(Callee);
1624 for (
auto Reg : RegsToPass) {
1625 Ops.push_back(DAG.getRegister(
Reg.first,
Reg.second.getValueType()));
1635 TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv);
1636 assert(Mask &&
"Missing call preserved mask for calling convention");
1637 Ops.push_back(DAG.getRegisterMask(Mask));
1639 if (InGlue.getNode()) {
1640 Ops.push_back(InGlue);
1644 InGlue = Chain.getValue(1);
1647 Chain = DAG.getCALLSEQ_END(Chain, NumBytes, 0, InGlue,
DL);
1650 InGlue = Chain.getValue(1);
1655 return LowerCallResult(Chain, InGlue, CallConv, isVarArg, Ins,
DL, DAG,
1662SDValue AVRTargetLowering::LowerCallResult(
1663 SDValue Chain, SDValue InGlue,
CallingConv::ID CallConv,
bool isVarArg,
1664 const SmallVectorImpl<ISD::InputArg> &Ins,
const SDLoc &dl,
1665 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals)
const {
1668 SmallVector<CCValAssign, 16> RVLocs;
1669 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1674 CCInfo.AnalyzeCallResult(Ins, RetCC_AVR_BUILTIN);
1680 for (CCValAssign
const &RVLoc : RVLocs) {
1681 Chain = DAG.getCopyFromReg(Chain, dl, RVLoc.getLocReg(), RVLoc.getValVT(),
1684 InGlue = Chain.getValue(2);
1685 InVals.push_back(Chain.getValue(0));
1695bool AVRTargetLowering::CanLowerReturn(
1697 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context)
const {
1699 SmallVector<CCValAssign, 16> RVLocs;
1700 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
1701 return CCInfo.CheckReturn(Outs, RetCC_AVR_BUILTIN);
1709AVRTargetLowering::LowerReturn(SDValue Chain,
CallingConv::ID CallConv,
1711 const SmallVectorImpl<ISD::OutputArg> &Outs,
1712 const SmallVectorImpl<SDValue> &OutVals,
1713 const SDLoc &dl, SelectionDAG &DAG)
const {
1715 SmallVector<CCValAssign, 16> RVLocs;
1718 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1721 MachineFunction &MF = DAG.getMachineFunction();
1725 CCInfo.AnalyzeReturn(Outs, RetCC_AVR_BUILTIN);
1731 SmallVector<SDValue, 4> RetOps(1, Chain);
1733 for (
unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1734 CCValAssign &VA = RVLocs[i];
1735 assert(VA.isRegLoc() &&
"Can only return in registers!");
1737 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Glue);
1740 Glue = Chain.getValue(1);
1741 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
1746 if (MF.getFunction().getAttributes().hasFnAttr(Attribute::Naked)) {
1750 const AVRMachineFunctionInfo *AFI = MF.getInfo<AVRMachineFunctionInfo>();
1752 if (!AFI->isInterruptOrSignalHandler()) {
1765 if (Glue.getNode()) {
1766 RetOps.push_back(Glue);
1769 return DAG.getNode(RetOpc, dl, MVT::Other, RetOps);
1776MachineBasicBlock *AVRTargetLowering::insertShift(MachineInstr &
MI,
1777 MachineBasicBlock *BB,
1780 const TargetRegisterClass *RC;
1781 bool HasRepeatedOperand =
false;
1782 MachineFunction *
F = BB->getParent();
1783 MachineRegisterInfo &RI =
F->getRegInfo();
1787 switch (
MI.getOpcode()) {
1792 RC = &AVR::GPR8RegClass;
1793 HasRepeatedOperand =
true;
1797 RC = &AVR::DREGSRegClass;
1801 RC = &AVR::GPR8RegClass;
1805 RC = &AVR::DREGSRegClass;
1809 RC = &AVR::GPR8RegClass;
1813 RC = &AVR::DREGSRegClass;
1816 Opc =
Tiny ? AVR::ROLBRdR17 : AVR::ROLBRdR1;
1817 RC = &AVR::GPR8RegClass;
1821 RC = &AVR::DREGSRegClass;
1825 RC = &AVR::GPR8RegClass;
1829 RC = &AVR::DREGSRegClass;
1833 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1836 for (
I = BB->getIterator();
I !=
F->end() && &(*
I) != BB; ++
I)
1842 MachineBasicBlock *LoopBB =
F->CreateMachineBasicBlock(LLVM_BB);
1843 MachineBasicBlock *CheckBB =
F->CreateMachineBasicBlock(LLVM_BB);
1844 MachineBasicBlock *RemBB =
F->CreateMachineBasicBlock(LLVM_BB);
1846 F->insert(
I, LoopBB);
1847 F->insert(
I, CheckBB);
1848 F->insert(
I, RemBB);
1854 RemBB->transferSuccessorsAndUpdatePHIs(BB);
1857 BB->addSuccessor(CheckBB);
1858 LoopBB->addSuccessor(CheckBB);
1859 CheckBB->addSuccessor(LoopBB);
1860 CheckBB->addSuccessor(RemBB);
1862 Register ShiftAmtReg = RI.createVirtualRegister(&AVR::GPR8RegClass);
1863 Register ShiftAmtReg2 = RI.createVirtualRegister(&AVR::GPR8RegClass);
1864 Register ShiftReg = RI.createVirtualRegister(RC);
1865 Register ShiftReg2 = RI.createVirtualRegister(RC);
1866 Register ShiftAmtSrcReg =
MI.getOperand(2).getReg();
1876 auto ShiftMI =
BuildMI(LoopBB, dl,
TII.get(Opc), ShiftReg2).
addReg(ShiftReg);
1877 if (HasRepeatedOperand)
1878 ShiftMI.
addReg(ShiftReg);
1886 BuildMI(CheckBB, dl,
TII.get(AVR::PHI), ShiftReg)
1891 BuildMI(CheckBB, dl,
TII.get(AVR::PHI), ShiftAmtReg)
1896 BuildMI(CheckBB, dl,
TII.get(AVR::PHI), DstReg)
1902 BuildMI(CheckBB, dl,
TII.get(AVR::DECRd), ShiftAmtReg2).
addReg(ShiftAmtReg);
1905 MI.eraseFromParent();
1928 const bool ShiftLeft = Opc ==
ISD::SHL;
1929 const bool ArithmeticShift = Opc ==
ISD::SRA;
1932 Register ZeroReg =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
1940 if (ShiftLeft && (ShiftAmt % 8) >= 6) {
1945 size_t ShiftRegsOffset = ShiftAmt / 8;
1946 size_t ShiftRegsSize = Regs.size() - ShiftRegsOffset;
1948 Regs.
slice(ShiftRegsOffset, ShiftRegsSize);
1956 Register LowByte =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
1960 if (ShiftAmt % 8 == 6) {
1962 Register NewLowByte =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
1964 LowByte = NewLowByte;
1968 for (
size_t I = 0;
I < Regs.size();
I++) {
1969 int ShiftRegsIdx =
I + 1;
1970 if (ShiftRegsIdx < (
int)ShiftRegs.
size()) {
1971 Regs[
I] = ShiftRegs[ShiftRegsIdx];
1972 }
else if (ShiftRegsIdx == (
int)ShiftRegs.
size()) {
1973 Regs[
I] = std::pair(LowByte, 0);
1975 Regs[
I] = std::pair(ZeroReg, 0);
1983 if (!ShiftLeft && (ShiftAmt % 8) >= 6) {
1986 size_t ShiftRegsSize = Regs.size() - (ShiftAmt / 8);
1988 Regs.
slice(0, ShiftRegsSize);
1997 Register HighByte =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
1999 if (ArithmeticShift) {
2017 if (ShiftAmt % 8 == 6) {
2020 Register NewExt =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
2028 for (
int I = Regs.size() - 1;
I >= 0;
I--) {
2029 int ShiftRegsIdx =
I - (Regs.size() - ShiftRegs.
size()) - 1;
2030 if (ShiftRegsIdx >= 0) {
2031 Regs[
I] = ShiftRegs[ShiftRegsIdx];
2032 }
else if (ShiftRegsIdx == -1) {
2033 Regs[
I] = std::pair(HighByte, 0);
2035 Regs[
I] = std::pair(ExtByte, 0);
2044 while (ShiftLeft && ShiftAmt >= 8) {
2046 for (
size_t I = 0;
I < Regs.size() - 1;
I++) {
2047 Regs[
I] = Regs[
I + 1];
2051 Regs[Regs.size() - 1] = std::pair(ZeroReg, 0);
2054 Regs = Regs.drop_back(1);
2061 if (!ShiftLeft && ShiftAmt >= 8) {
2062 if (ArithmeticShift) {
2064 ShrExtendReg =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
2065 Register Tmp =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
2067 .
addReg(Regs[0].first, 0, Regs[0].second)
2068 .
addReg(Regs[0].first, 0, Regs[0].second);
2069 BuildMI(*BB,
MI, dl,
TII.get(AVR::SBCRdRr), ShrExtendReg)
2073 ShrExtendReg = ZeroReg;
2075 for (; ShiftAmt >= 8; ShiftAmt -= 8) {
2077 for (
size_t I = Regs.size() - 1;
I != 0;
I--) {
2078 Regs[
I] = Regs[
I - 1];
2082 Regs[0] = std::pair(ShrExtendReg, 0);
2085 Regs = Regs.drop_front(1);
2090 assert((ShiftAmt < 8) &&
"Unexpect shift amount");
2110 if (!ArithmeticShift && ShiftAmt >= 4) {
2112 for (
size_t I = 0;
I < Regs.size();
I++) {
2113 size_t Idx = ShiftLeft ?
I : Regs.size() -
I - 1;
2114 Register SwapReg =
MRI.createVirtualRegister(&AVR::LD8RegClass);
2118 Register R =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
2124 Register AndReg =
MRI.createVirtualRegister(&AVR::LD8RegClass);
2127 .
addImm(ShiftLeft ? 0xf0 : 0x0f);
2129 Register R =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
2133 size_t PrevIdx = ShiftLeft ?
Idx - 1 :
Idx + 1;
2134 Regs[PrevIdx] = std::pair(R, 0);
2137 Regs[
Idx] = std::pair(AndReg, 0);
2144 while (ShiftLeft && ShiftAmt) {
2146 for (ssize_t
I = Regs.size() - 1;
I >= 0;
I--) {
2147 Register Out =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
2150 if (
I == (ssize_t)Regs.size() - 1) {
2153 .
addReg(In, 0, InSubreg);
2157 .
addReg(In, 0, InSubreg);
2159 Regs[
I] = std::pair(Out, 0);
2163 while (!ShiftLeft && ShiftAmt) {
2165 for (
size_t I = 0;
I < Regs.size();
I++) {
2166 Register Out =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
2170 unsigned Opc = ArithmeticShift ? AVR::ASRRd : AVR::LSRRd;
2175 Regs[
I] = std::pair(Out, 0);
2180 if (ShiftAmt != 0) {
2187AVRTargetLowering::insertWideShift(MachineInstr &
MI,
2188 MachineBasicBlock *BB)
const {
2190 const DebugLoc &dl =
MI.getDebugLoc();
2194 int64_t ShiftAmt =
MI.getOperand(4).getImm();
2196 switch (
MI.getOpcode()) {
2209 std::array<std::pair<Register, int>, 4>
Registers = {
2210 std::pair(
MI.getOperand(3).getReg(), AVR::sub_hi),
2211 std::pair(
MI.getOperand(3).getReg(), AVR::sub_lo),
2212 std::pair(
MI.getOperand(2).getReg(), AVR::sub_hi),
2213 std::pair(
MI.getOperand(2).getReg(), AVR::sub_lo),
2231 (Opc !=
ISD::SRA || (ShiftAmt < 16 || ShiftAmt >= 22))) {
2233 BuildMI(*BB,
MI, dl,
TII.get(AVR::REG_SEQUENCE),
MI.getOperand(0).getReg())
2238 BuildMI(*BB,
MI, dl,
TII.get(AVR::REG_SEQUENCE),
MI.getOperand(1).getReg())
2245 BuildMI(*BB,
MI, dl,
TII.get(AVR::REG_SEQUENCE),
MI.getOperand(1).getReg())
2250 BuildMI(*BB,
MI, dl,
TII.get(AVR::REG_SEQUENCE),
MI.getOperand(0).getReg())
2258 MI.eraseFromParent();
2263 if (
I->getOpcode() == AVR::COPY) {
2264 Register SrcReg =
I->getOperand(1).getReg();
2265 return (SrcReg == AVR::R0 || SrcReg == AVR::R1);
2274MachineBasicBlock *AVRTargetLowering::insertMul(MachineInstr &
MI,
2275 MachineBasicBlock *BB)
const {
2283 BuildMI(*BB,
I,
MI.getDebugLoc(),
TII.get(AVR::EORRdRr), AVR::R1)
2291AVRTargetLowering::insertCopyZero(MachineInstr &
MI,
2292 MachineBasicBlock *BB)
const {
2296 .
add(
MI.getOperand(0))
2298 MI.eraseFromParent();
2304MachineBasicBlock *AVRTargetLowering::insertAtomicArithmeticOp(
2305 MachineInstr &
MI, MachineBasicBlock *BB,
unsigned Opcode,
int Width)
const {
2306 MachineRegisterInfo &
MRI = BB->getParent()->getRegInfo();
2320 const TargetRegisterClass *RC =
2321 (
Width == 8) ? &AVR::GPR8RegClass : &AVR::DREGSRegClass;
2322 unsigned LoadOpcode = (
Width == 8) ? AVR::LDRdPtr : AVR::LDWRdPtr;
2323 unsigned StoreOpcode = (
Width == 8) ? AVR::STPtrRr : AVR::STWPtrRr;
2331 BuildMI(*BB,
I, dl,
TII.get(LoadOpcode),
MI.getOperand(0).getReg())
2332 .
add(
MI.getOperand(1));
2338 .
add(
MI.getOperand(2));
2342 .
add(
MI.getOperand(1))
2351 MI.eraseFromParent();
2358 int Opc =
MI.getOpcode();
2374 return insertShift(
MI,
MBB, STI.hasTinyEncoding());
2378 return insertWideShift(
MI,
MBB);
2381 return insertMul(
MI,
MBB);
2383 return insertCopyZero(
MI,
MBB);
2384 case AVR::AtomicLoadAdd8:
2385 return insertAtomicArithmeticOp(
MI,
MBB, AVR::ADDRdRr, 8);
2386 case AVR::AtomicLoadAdd16:
2387 return insertAtomicArithmeticOp(
MI,
MBB, AVR::ADDWRdRr, 16);
2388 case AVR::AtomicLoadSub8:
2389 return insertAtomicArithmeticOp(
MI,
MBB, AVR::SUBRdRr, 8);
2390 case AVR::AtomicLoadSub16:
2391 return insertAtomicArithmeticOp(
MI,
MBB, AVR::SUBWRdRr, 16);
2392 case AVR::AtomicLoadAnd8:
2393 return insertAtomicArithmeticOp(
MI,
MBB, AVR::ANDRdRr, 8);
2394 case AVR::AtomicLoadAnd16:
2395 return insertAtomicArithmeticOp(
MI,
MBB, AVR::ANDWRdRr, 16);
2396 case AVR::AtomicLoadOr8:
2397 return insertAtomicArithmeticOp(
MI,
MBB, AVR::ORRdRr, 8);
2398 case AVR::AtomicLoadOr16:
2399 return insertAtomicArithmeticOp(
MI,
MBB, AVR::ORWRdRr, 16);
2400 case AVR::AtomicLoadXor8:
2401 return insertAtomicArithmeticOp(
MI,
MBB, AVR::EORRdRr, 8);
2402 case AVR::AtomicLoadXor16:
2403 return insertAtomicArithmeticOp(
MI,
MBB, AVR::EORWRdRr, 16);
2406 assert((Opc == AVR::Select16 || Opc == AVR::Select8) &&
2407 "Unexpected instr type to insert");
2428 if (FallThrough !=
nullptr) {
2444 unsigned CallFrameSize =
TII.getCallFrameSizeAt(
MI);
2458 MBB->addSuccessor(falseMBB);
2459 MBB->addSuccessor(trueMBB);
2467 MI.getOperand(0).getReg())
2473 MI.eraseFromParent();
2483 if (Constraint.
size() == 1) {
2485 switch (Constraint[0]) {
2528 switch (ConstraintCode[0]) {
2539 Value *CallOperandVal =
info.CallOperandVal;
2544 if (!CallOperandVal) {
2549 switch (*constraint) {
2573 if (
const ConstantFP *
C = dyn_cast<ConstantFP>(CallOperandVal)) {
2580 if (
const ConstantInt *
C = dyn_cast<ConstantInt>(CallOperandVal)) {
2581 if (isUInt<6>(
C->getZExtValue())) {
2587 if (
const ConstantInt *
C = dyn_cast<ConstantInt>(CallOperandVal)) {
2588 if ((
C->getSExtValue() >= -63) && (
C->getSExtValue() <= 0)) {
2594 if (
const ConstantInt *
C = dyn_cast<ConstantInt>(CallOperandVal)) {
2595 if (
C->getZExtValue() == 2) {
2601 if (
const ConstantInt *
C = dyn_cast<ConstantInt>(CallOperandVal)) {
2602 if (
C->getZExtValue() == 0) {
2608 if (
const ConstantInt *
C = dyn_cast<ConstantInt>(CallOperandVal)) {
2609 if (isUInt<8>(
C->getZExtValue())) {
2615 if (
const ConstantInt *
C = dyn_cast<ConstantInt>(CallOperandVal)) {
2616 if (
C->getSExtValue() == -1) {
2622 if (
const ConstantInt *
C = dyn_cast<ConstantInt>(CallOperandVal)) {
2623 if ((
C->getZExtValue() == 8) || (
C->getZExtValue() == 16) ||
2624 (
C->getZExtValue() == 24)) {
2630 if (
const ConstantInt *
C = dyn_cast<ConstantInt>(CallOperandVal)) {
2631 if (
C->getZExtValue() == 1) {
2637 if (
const ConstantInt *
C = dyn_cast<ConstantInt>(CallOperandVal)) {
2638 if ((
C->getSExtValue() >= -6) && (
C->getSExtValue() <= 5)) {
2651std::pair<unsigned, const TargetRegisterClass *>
2655 if (Constraint.
size() == 1) {
2656 switch (Constraint[0]) {
2659 return std::make_pair(0U, &AVR::LD8loRegClass);
2660 else if (VT == MVT::i16)
2661 return std::make_pair(0U, &AVR::DREGSLD8loRegClass);
2664 if (VT == MVT::i8 || VT == MVT::i16)
2665 return std::make_pair(0U, &AVR::PTRDISPREGSRegClass);
2669 return std::make_pair(0U, &AVR::LD8RegClass);
2670 else if (VT == MVT::i16)
2671 return std::make_pair(0U, &AVR::DLDREGSRegClass);
2675 return std::make_pair(0U, &AVR::GPR8loRegClass);
2676 else if (VT == MVT::i16)
2677 return std::make_pair(0U, &AVR::DREGSloRegClass);
2680 if (VT == MVT::i8 || VT == MVT::i16)
2681 return std::make_pair(0U, &AVR::PTRREGSRegClass);
2684 return std::make_pair(0U, &AVR::GPRSPRegClass);
2687 return std::make_pair(0U, &AVR::GPR8RegClass);
2688 else if (VT == MVT::i16)
2689 return std::make_pair(0U, &AVR::DREGSRegClass);
2694 &AVR::GPR8RegClass);
2697 if (VT == MVT::i8 || VT == MVT::i16)
2698 return std::make_pair(0U, &AVR::IWREGSRegClass);
2702 if (VT == MVT::i8 || VT == MVT::i16)
2703 return std::make_pair(
unsigned(AVR::R27R26), &AVR::PTRREGSRegClass);
2707 if (VT == MVT::i8 || VT == MVT::i16)
2708 return std::make_pair(
unsigned(AVR::R29R28), &AVR::PTRREGSRegClass);
2712 if (VT == MVT::i8 || VT == MVT::i16)
2713 return std::make_pair(
unsigned(AVR::R31R30), &AVR::PTRREGSRegClass);
2726 std::vector<SDValue> &Ops,
2730 EVT Ty =
Op.getValueType();
2733 if (Constraint.
size() != 1) {
2737 char ConstraintLetter = Constraint[0];
2738 switch (ConstraintLetter) {
2756 int64_t CVal64 =
C->getSExtValue();
2758 switch (ConstraintLetter) {
2760 if (!isUInt<6>(CUVal64))
2765 if (CVal64 < -63 || CVal64 > 0)
2780 if (!isUInt<8>(CUVal64))
2796 if (CUVal64 != 8 && CUVal64 != 16 && CUVal64 != 24)
2806 if (CVal64 < -6 || CVal64 > 5)
2816 if (!FC || !FC->isZero())
2823 if (Result.getNode()) {
2824 Ops.push_back(Result);
2837 .
Case(
"r0", AVR::R0)
2838 .
Case(
"r1", AVR::R1)
2842 .
Case(
"r0", AVR::R1R0)
2843 .
Case(
"sp", AVR::SP)
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
const HexagonInstrInfo * TII
unsigned const TargetRegisterInfo * TRI
const char LLVMTargetMachineRef TM
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
SI Pre allocate WWM Registers
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
Utilities related to the AVR instruction set.
A specific AVR target MCU.
Register getTmpRegister() const
Register getZeroRegister() const
const AVRInstrInfo * getInstrInfo() const override
const AVRRegisterInfo * getRegisterInfo() const override
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
Replace a node with an illegal result type with a new node built out of custom code.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
Returns true by value, base pointer and offset pointer and addressing mode by reference if the node's...
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
ConstraintType getConstraintType(StringRef Constraint) const override
Given a constraint, return the type of constraint it is for this target.
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
const AVRSubtarget & Subtarget
InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const override
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
AVRTargetLowering(const AVRTargetMachine &TM, const AVRSubtarget &STI)
void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
Lower the specified operand into the Ops vector.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
Return the ValueType of the result of SETCC operations.
bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
Returns true by value, base pointer and offset pointer and addressing mode by reference if this node ...
A generic AVR implementation.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
LLVM Basic Block Representation.
CCState - This class holds information needed while lowering arguments and return values.
MCRegister AllocateReg(MCPhysReg Reg)
AllocateReg - Attempt to allocate one register.
LLVMContext & getContext() const
int64_t AllocateStack(unsigned Size, Align Alignment)
AllocateStack - Allocate a chunk of stack space with the specified size and alignment.
void addLoc(const CCValAssign &V)
static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)
static CCValAssign getReg(unsigned ValNo, MVT ValVT, unsigned RegNo, MVT LocVT, LocInfo HTP, bool IsCustom=false)
ConstantFP - Floating Point Values [float, double].
This is the shared class of boolean and integer constants.
This class represents an Operation in the Expression.
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
Align getABITypeAlign(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
This is an important class for using LLVM in a threaded context.
This class is used to represent ISD::LOAD nodes.
static auto integer_valuetypes()
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
void setCallFrameSize(unsigned N)
Set the call frame size on entry to this basic block.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
BasicBlockListType::iterator iterator
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
MutableArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDValue getValue(unsigned R) const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
This class is used to represent ISD::STORE nodes.
StringRef - Represent a constant reference to a string, i.e.
constexpr size_t size() const
size - Get the string size.
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
static StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
TargetInstrInfo - Interface to description of machine instruction set.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const
Get the CallingConv that should be used for the specified libcall.
void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC)
Set the CallingConv that should be used for the specified libcall.
void setIndexedLoadAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed load does or does not work with the specified type and indicate w...
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
void setIndexedStoreAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed store does or does not work with the specified type and indicate ...
void setSupportsUnalignedAtomics(bool UnalignedSupported)
Sets whether unaligned atomic operations are supported.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
void setMinimumJumpTableEntries(unsigned Val)
Indicate the minimum number of blocks to generate jump tables.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
@ ZeroOrOneBooleanContent
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
std::vector< ArgListEntry > ArgListTy
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetInstrInfo * getInstrInfo() const
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM Value Representation.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
CondCodes
AVR specific condition codes.
@ COND_SH
Unsigned same or higher.
@ COND_GE
Greater than or equal.
@ ASRWN
Word arithmetic shift right N bits.
@ RET_GLUE
Return from subroutine.
@ SWAP
Swap Rd[7:4] <-> Rd[3:0].
@ RETI_GLUE
Return from ISR.
@ LSLW
Wide logical shift left.
@ ROLLOOP
A loop of single left bit rotate instructions.
@ ASRLO
Lower 8-bit of word arithmetic shift right.
@ ASRLOOP
A loop of single arithmetic shift right instructions.
@ LSRLOOP
A loop of single logical shift right instructions.
@ LSR
Logical shift right.
@ LSRLO
Lower 8-bit of word logical shift right.
@ TST
Test for zero or minus instruction.
@ LSRBN
Byte logical shift right N bits.
@ ASRW
Wide arithmetic shift right.
@ SELECT_CC
Operand 0 and operand 1 are selection variable, operand 2 is condition code and operand 3 is flag ope...
@ CMPC
Compare with carry instruction.
@ LSLWN
Word logical shift left N bits.
@ RORLOOP
A loop of single right bit rotate instructions.
@ CMP
Compare instruction.
@ ASRBN
Byte arithmetic shift right N bits.
@ CALL
Represents an abstract call instruction, which includes a bunch of information.
@ ASR
Arithmetic shift right.
@ LSRW
Wide logical shift right.
@ LSLBN
Byte logical shift left N bits.
@ LSLHI
Higher 8-bit of word logical shift left.
@ LSRWN
Word logical shift right N bits.
@ WRAPPER
A wrapper node for TargetConstantPool, TargetExternalSymbol, and TargetGlobalAddress.
@ LSLLOOP
A loop of single logical shift left instructions.
@ BRCOND
AVR conditional branches.
bool isProgramMemoryAccess(MemSDNode const *N)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ AVR_BUILTIN
Used for special AVR rtlib functions which have an "optimized" convention to preserve registers.
@ C
The default llvm calling convention, compatible with C.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ BSWAP
Byte Swap and Counting operators.
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
@ ADD
Simple integer binary arithmetic operators.
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ SIGN_EXTEND
Conversion operators.
@ BR_CC
BR_CC - Conditional branch.
@ BR_JT
BR_JT - Jumptable branch.
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ BasicBlock
Various leaf nodes.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ ATOMIC_CMP_SWAP
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo,...
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
@ INLINEASM
INLINEASM - Represents an inline asm block.
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ BRCOND
BRCOND - Conditional branch.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.
@ Undef
Value of the register doesn't matter.
Type
MessagePack types as defined in the standard, with the exception of Integer being divided into a sign...
This is an optimization pass for GlobalISel generic memory operations.
static void analyzeReturnValues(const SmallVectorImpl< ArgT > &Args, CCState &CCInfo, bool Tiny)
Analyze incoming and outgoing value of returning from a function.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
static const MCPhysReg RegList16Tiny[]
static const MCPhysReg RegList8Tiny[]
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
static void analyzeArguments(TargetLowering::CallLoweringInfo *CLI, const Function *F, const DataLayout *TD, const SmallVectorImpl< ArgT > &Args, SmallVectorImpl< CCValAssign > &ArgLocs, CCState &CCInfo, bool Tiny)
Analyze incoming and outgoing function arguments.
static const MCPhysReg RegList16AVR[]
static unsigned getTotalArgumentsSizeInBytes(const SmallVectorImpl< ArgT > &Args)
Count the total number of bytes needed to pass or return these arguments.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
DWARFExpression::Operation Op
static AVRCC::CondCodes intCCToAVRCC(ISD::CondCode CC)
IntCCToAVRCC - Convert a DAG integer condition code to an AVR CC.
static bool isCopyMulResult(MachineBasicBlock::iterator const &I)
static void insertMultibyteShift(MachineInstr &MI, MachineBasicBlock *BB, MutableArrayRef< std::pair< Register, int > > Regs, ISD::NodeType Opc, int64_t ShiftAmt)
static const MCPhysReg RegList8AVR[]
Registers for calling conventions, ordered in reverse as required by ABI.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
bool isVector() const
Return true if this is a vector value type.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
This contains information for each constraint that we are lowering.
This structure contains all information that is necessary for lowering calls.