177 if (!
Subtarget.supportsMultiplication()) {
214 EVT VT =
Op.getValueType();
217 "Expected power-of-2 shift amount");
225 SDVTList ResTys = DAG.
getVTList(MVT::i16, MVT::i16);
232 uint64_t ShiftAmount =
N->getConstantOperandVal(1);
233 if (ShiftAmount == 16) {
238 switch (
Op.getOpcode()) {
251 switch (
Op.getOpcode()) {
271 switch (
Op.getOpcode()) {
275 return DAG.
getNode(AVRISD::LSLLOOP, dl, VT,
N->getOperand(0),
278 return DAG.
getNode(AVRISD::LSRLOOP, dl, VT,
N->getOperand(0),
282 EVT AmtVT = Amt.getValueType();
285 return DAG.
getNode(AVRISD::ROLLOOP, dl, VT,
N->getOperand(0), Amt);
289 EVT AmtVT = Amt.getValueType();
292 return DAG.
getNode(AVRISD::RORLOOP, dl, VT,
N->getOperand(0), Amt);
295 return DAG.
getNode(AVRISD::ASRLOOP, dl, VT,
N->getOperand(0),
300 uint64_t ShiftAmount =
N->getConstantOperandVal(1);
303 switch (
Op.getOpcode()) {
327 if (
Op.getOpcode() ==
ISD::SHL && 4 <= ShiftAmount && ShiftAmount < 7) {
329 Victim = DAG.
getNode(AVRISD::SWAP, dl, VT, Victim);
333 }
else if (
Op.getOpcode() ==
ISD::SRL && 4 <= ShiftAmount &&
336 Victim = DAG.
getNode(AVRISD::SWAP, dl, VT, Victim);
340 }
else if (
Op.getOpcode() ==
ISD::SHL && ShiftAmount == 7) {
342 Victim = DAG.
getNode(AVRISD::LSLBN, dl, VT, Victim,
345 }
else if (
Op.getOpcode() ==
ISD::SRL && ShiftAmount == 7) {
347 Victim = DAG.
getNode(AVRISD::LSRBN, dl, VT, Victim,
350 }
else if (
Op.getOpcode() ==
ISD::SRA && ShiftAmount == 6) {
352 Victim = DAG.
getNode(AVRISD::ASRBN, dl, VT, Victim,
355 }
else if (
Op.getOpcode() ==
ISD::SRA && ShiftAmount == 7) {
357 Victim = DAG.
getNode(AVRISD::ASRBN, dl, VT, Victim,
360 }
else if (
Op.getOpcode() ==
ISD::ROTL && ShiftAmount == 3) {
362 Victim = DAG.
getNode(AVRISD::SWAP, dl, VT, Victim);
363 Victim = DAG.
getNode(AVRISD::ROR, dl, VT, Victim);
365 }
else if (
Op.getOpcode() ==
ISD::ROTR && ShiftAmount == 3) {
367 Victim = DAG.
getNode(AVRISD::SWAP, dl, VT, Victim);
368 Victim = DAG.
getNode(AVRISD::ROL, dl, VT, Victim);
370 }
else if (
Op.getOpcode() ==
ISD::ROTL && ShiftAmount == 7) {
372 Victim = DAG.
getNode(AVRISD::ROR, dl, VT, Victim);
374 }
else if (
Op.getOpcode() ==
ISD::ROTR && ShiftAmount == 7) {
376 Victim = DAG.
getNode(AVRISD::ROL, dl, VT, Victim);
381 Victim = DAG.
getNode(AVRISD::SWAP, dl, VT, Victim);
387 switch (ShiftAmount) {
389 Victim = DAG.
getNode(AVRISD::ASRWN, dl, VT, Victim,
394 Victim = DAG.
getNode(AVRISD::ASRWN, dl, VT, Victim,
399 Victim = DAG.
getNode(AVRISD::ASRWN, dl, VT, Victim,
406 if (4 <= ShiftAmount && ShiftAmount < 8)
407 switch (
Op.getOpcode()) {
409 Victim = DAG.
getNode(AVRISD::LSLWN, dl, VT, Victim,
414 Victim = DAG.
getNode(AVRISD::LSRWN, dl, VT, Victim,
421 else if (8 <= ShiftAmount && ShiftAmount < 12)
422 switch (
Op.getOpcode()) {
424 Victim = DAG.
getNode(AVRISD::LSLWN, dl, VT, Victim,
428 Opc8 = AVRISD::LSLHI;
431 Victim = DAG.
getNode(AVRISD::LSRWN, dl, VT, Victim,
435 Opc8 = AVRISD::LSRLO;
438 Victim = DAG.
getNode(AVRISD::ASRWN, dl, VT, Victim,
442 Opc8 = AVRISD::ASRLO;
447 else if (12 <= ShiftAmount)
448 switch (
Op.getOpcode()) {
450 Victim = DAG.
getNode(AVRISD::LSLWN, dl, VT, Victim,
454 Opc8 = AVRISD::LSLHI;
457 Victim = DAG.
getNode(AVRISD::LSRWN, dl, VT, Victim,
461 Opc8 = AVRISD::LSRLO;
464 Victim = DAG.
getNode(AVRISD::ASRWN, dl, VT, Victim,
468 Opc8 = AVRISD::ASRLO;
475 while (ShiftAmount--) {
476 Victim = DAG.
getNode(Opc8, dl, VT, Victim);
483 unsigned Opcode =
Op->getOpcode();
485 "Invalid opcode for Div/Rem lowering");
487 EVT VT =
Op->getValueType(0);
488 Type *Ty = VT.getTypeForEVT(*DAG.getContext());
491 switch (VT.getSimpleVT().SimpleTy) {
495 LC = IsSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8;
498 LC = IsSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16;
501 LC = IsSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32;
505 SDValue InChain = DAG.getEntryNode();
509 TargetLowering::ArgListEntry
Entry(
510 Value,
Value.getValueType().getTypeForEVT(*DAG.getContext()));
511 Entry.IsSExt = IsSigned;
512 Entry.IsZExt = !IsSigned;
513 Args.push_back(Entry);
516 RTLIB::LibcallImpl LCImpl = DAG.getLibcalls().getLibcallImpl(LC);
517 if (LCImpl == RTLIB::Unsupported)
521 DAG.getExternalSymbol(LCImpl,
getPointerTy(DAG.getDataLayout()));
526 TargetLowering::CallLoweringInfo CLI(DAG);
529 .setLibCallee(DAG.getLibcalls().getLibcallImplCallingConv(LCImpl), RetTy,
530 Callee, std::move(Args))
532 .setSExtResult(IsSigned)
533 .setZExtResult(!IsSigned);
535 std::pair<SDValue, SDValue> CallInfo =
LowerCallTo(CLI);
536 return CallInfo.first;
541 auto DL = DAG.getDataLayout();
554 auto DL = DAG.getDataLayout();
584 SelectionDAG &DAG, SDLoc
DL)
const {
585 assert((
LHS.getSimpleValueType() ==
RHS.getSimpleValueType()) &&
586 "LHS and RHS have different types");
587 assert(((
LHS.getSimpleValueType() == MVT::i16) ||
588 (
LHS.getSimpleValueType() == MVT::i8)) &&
589 "invalid comparison type");
598 DAG.getIntPtrConstant(0,
DL));
600 DAG.getIntPtrConstant(1,
DL));
601 SDValue RHSlo = (Imm & 0xff) == 0
604 DAG.getIntPtrConstant(0,
DL));
605 SDValue RHShi = (Imm & 0xff00) == 0
608 DAG.getIntPtrConstant(1,
DL));
609 Cmp = DAG.getNode(AVRISD::CMP,
DL, MVT::Glue, LHSlo, RHSlo);
610 Cmp = DAG.getNode(AVRISD::CMPC,
DL, MVT::Glue, LHShi, RHShi, Cmp);
614 uint64_t
Imm =
LHS->getAsZExtVal();
616 ? DAG.getRegister(
Subtarget.getZeroRegister(), MVT::i8)
618 DAG.getIntPtrConstant(0,
DL));
620 ? DAG.getRegister(
Subtarget.getZeroRegister(), MVT::i8)
622 DAG.getIntPtrConstant(1,
DL));
624 DAG.getIntPtrConstant(0,
DL));
626 DAG.getIntPtrConstant(1,
DL));
627 Cmp = DAG.getNode(AVRISD::CMP,
DL, MVT::Glue, LHSlo, RHSlo);
628 Cmp = DAG.getNode(AVRISD::CMPC,
DL, MVT::Glue, LHShi, RHShi, Cmp);
631 Cmp = DAG.getNode(AVRISD::CMP,
DL, MVT::Glue,
LHS,
RHS);
643 EVT VT =
LHS.getValueType();
644 bool UseTest =
false;
657 switch (
C->getSExtValue()) {
669 LHS = DAG.getConstant(0,
DL, VT);
676 RHS = DAG.getSignedConstant(
C->getSExtValue() + 1,
DL, VT);
690 switch (
C->getSExtValue()) {
695 LHS = DAG.getConstant(0,
DL, VT);
722 assert((!
C->isAllOnes()) &&
"integer overflow in comparison transform");
723 RHS = DAG.getConstant(
C->getZExtValue() + 1,
DL, VT);
736 if (VT == MVT::i32) {
738 DAG.getIntPtrConstant(0,
DL));
740 DAG.getIntPtrConstant(1,
DL));
742 DAG.getIntPtrConstant(0,
DL));
744 DAG.getIntPtrConstant(1,
DL));
749 DAG.getIntPtrConstant(1,
DL));
750 Cmp = DAG.getNode(AVRISD::TST,
DL, MVT::Glue, Top);
752 Cmp = getAVRCmp(LHSlo, RHSlo, DAG,
DL);
753 Cmp = DAG.getNode(AVRISD::CMPC,
DL, MVT::Glue, LHShi, RHShi, Cmp);
755 }
else if (VT == MVT::i64) {
757 DAG.getIntPtrConstant(0,
DL));
759 DAG.getIntPtrConstant(1,
DL));
762 DAG.getIntPtrConstant(0,
DL));
764 DAG.getIntPtrConstant(1,
DL));
766 DAG.getIntPtrConstant(0,
DL));
768 DAG.getIntPtrConstant(1,
DL));
771 DAG.getIntPtrConstant(0,
DL));
773 DAG.getIntPtrConstant(1,
DL));
776 DAG.getIntPtrConstant(0,
DL));
778 DAG.getIntPtrConstant(1,
DL));
780 DAG.getIntPtrConstant(0,
DL));
782 DAG.getIntPtrConstant(1,
DL));
787 DAG.getIntPtrConstant(1,
DL));
788 Cmp = DAG.getNode(AVRISD::TST,
DL, MVT::Glue, Top);
790 Cmp = getAVRCmp(LHS0, RHS0, DAG,
DL);
791 Cmp = DAG.getNode(AVRISD::CMPC,
DL, MVT::Glue, LHS1, RHS1, Cmp);
792 Cmp = DAG.getNode(AVRISD::CMPC,
DL, MVT::Glue, LHS2, RHS2, Cmp);
793 Cmp = DAG.getNode(AVRISD::CMPC,
DL, MVT::Glue, LHS3, RHS3, Cmp);
795 }
else if (VT == MVT::i8 || VT == MVT::i16) {
798 Cmp = DAG.getNode(AVRISD::TST,
DL, MVT::Glue,
802 LHS, DAG.getIntPtrConstant(1,
DL)));
829 return DAG.getNode(AVRISD::BRCOND, dl, MVT::Other, Chain, Dest, TargetCC,
846 return DAG.getNode(AVRISD::SELECT_CC, dl,
Op.getValueType(),
Ops);
858 SDValue TrueV = DAG.getConstant(1,
DL,
Op.getValueType());
859 SDValue FalseV = DAG.getConstant(0,
DL,
Op.getValueType());
862 return DAG.getNode(AVRISD::SELECT_CC,
DL,
Op.getValueType(),
Ops);
866 const MachineFunction &MF = DAG.getMachineFunction();
867 const AVRMachineFunctionInfo *AFI = MF.getInfo<AVRMachineFunctionInfo>();
869 auto DL = DAG.getDataLayout();
876 return DAG.getStore(
Op.getOperand(0), dl, FI,
Op.getOperand(1),
877 MachinePointerInfo(SV));
898 SDNode *
N =
Op.getNode();
900 for (
unsigned I = 0;
I <
N->getNumOperands();
I++) {
902 if (Operand.getValueType() == MVT::Glue) {
907 Ops.push_back(Operand);
911 Ops.push_back(DAG.getTargetConstant(Flags, dl, MVT::i32));
912 Ops.push_back(ZeroReg);
920 DAG.ReplaceAllUsesOfValueWith(
Op, New);
921 DAG.ReplaceAllUsesOfValueWith(
Op.getValue(1),
New.getValue(1));
927 switch (
Op.getOpcode()) {
935 return LowerShifts(
Op, DAG);
937 return LowerGlobalAddress(
Op, DAG);
939 return LowerBlockAddress(
Op, DAG);
941 return LowerBR_CC(
Op, DAG);
943 return LowerSELECT_CC(
Op, DAG);
945 return LowerSETCC(
Op, DAG);
947 return LowerVASTART(
Op, DAG);
950 return LowerDivRem(
Op, DAG);
952 return LowerINLINEASM(
Op, DAG);
965 switch (
N->getOpcode()) {
1028 VT = LD->getMemoryVT();
1029 Op = LD->getBasePtr().getNode();
1036 VT = ST->getMemoryVT();
1037 Op = ST->getBasePtr().getNode();
1045 if (VT != MVT::i8 && VT != MVT::i16) {
1054 int RHSC =
RHS->getSExtValue();
1058 if ((VT == MVT::i16 && RHSC != -2) || (VT == MVT::i8 && RHSC != -1)) {
1062 Base =
Op->getOperand(0);
1085 VT = LD->getMemoryVT();
1086 Ptr = LD->getBasePtr();
1090 VT = ST->getMemoryVT();
1091 Ptr = ST->getBasePtr();
1099 if (VT == MVT::i16 && !
Subtarget.hasLowByteFirst())
1105 if (VT != MVT::i8 && VT != MVT::i16) {
1114 int RHSC =
RHS->getSExtValue();
1117 if ((VT == MVT::i16 && RHSC != 2) || (VT == MVT::i8 && RHSC != 1)) {
1127 Base =
Op->getOperand(0);
1152#include "AVRGenCallingConv.inc"
1157 AVR::R25, AVR::R24, AVR::R23, AVR::R22, AVR::R21, AVR::R20,
1158 AVR::R19, AVR::R18, AVR::R17, AVR::R16, AVR::R15, AVR::R14,
1159 AVR::R13, AVR::R12, AVR::R11, AVR::R10, AVR::R9, AVR::R8};
1161 AVR::R22, AVR::R21, AVR::R20};
1163 AVR::R26R25, AVR::R25R24, AVR::R24R23, AVR::R23R22, AVR::R22R21,
1164 AVR::R21R20, AVR::R20R19, AVR::R19R18, AVR::R18R17, AVR::R17R16,
1165 AVR::R16R15, AVR::R15R14, AVR::R14R13, AVR::R13R12, AVR::R12R11,
1166 AVR::R11R10, AVR::R10R9, AVR::R9R8};
1168 AVR::R24R23, AVR::R23R22,
1169 AVR::R22R21, AVR::R21R20};
1172 "8-bit and 16-bit register arrays must be of equal length");
1174 "8-bit and 16-bit register arrays must be of equal length");
1180template <
typename ArgT>
1197 unsigned NumArgs = Args.size();
1200 int RegLastIdx = -1;
1202 bool UseStack =
false;
1203 for (
unsigned i = 0; i != NumArgs;) {
1204 MVT VT = Args[i].VT;
1209 unsigned ArgIndex = Args[i].OrigArgIndex;
1212 for (; j != NumArgs; ++j) {
1213 if (Args[j].OrigArgIndex != ArgIndex)
1215 TotalBytes += Args[j].VT.getStoreSize();
1218 TotalBytes =
alignTo(TotalBytes, 2);
1220 if (TotalBytes == 0)
1223 unsigned RegIdx = RegLastIdx + TotalBytes;
1224 RegLastIdx = RegIdx;
1226 if (RegIdx >= RegList8.
size()) {
1229 for (; i != j; ++i) {
1230 MVT VT = Args[i].VT;
1240 if (VT == MVT::i8) {
1242 }
else if (VT == MVT::i16) {
1246 "calling convention can only manage i8 and i16 types");
1248 assert(
Reg &&
"register not available in calling convention");
1259template <
typename ArgT>
1262 unsigned TotalBytes = 0;
1264 for (
const ArgT &Arg : Args) {
1265 TotalBytes += Arg.VT.getStoreSize();
1273template <
typename ArgT>
1276 unsigned NumArgs = Args.size();
1280 assert(TotalBytes <= 4 &&
1281 "return values greater than 4 bytes cannot be lowered on AVRTiny");
1283 assert(TotalBytes <= 8 &&
1284 "return values greater than 8 bytes cannot be lowered on AVR");
1299 if (TotalBytes > 4) {
1302 TotalBytes =
alignTo(TotalBytes, 2);
1306 int RegIdx = TotalBytes - 1;
1307 for (
unsigned i = 0; i != NumArgs; ++i) {
1308 MVT VT = Args[i].VT;
1310 if (VT == MVT::i8) {
1312 }
else if (VT == MVT::i16) {
1317 assert(
Reg &&
"register not available in calling convention");
1324SDValue AVRTargetLowering::LowerFormalArguments(
1326 const SmallVectorImpl<ISD::InputArg> &Ins,
const SDLoc &dl,
1327 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals)
const {
1328 MachineFunction &MF = DAG.getMachineFunction();
1329 MachineFrameInfo &MFI = MF.getFrameInfo();
1330 auto DL = DAG.getDataLayout();
1334 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1339 CCInfo.AnalyzeFormalArguments(Ins, ArgCC_AVR_Vararg);
1346 for (CCValAssign &VA : ArgLocs) {
1349 if (VA.isRegLoc()) {
1350 EVT RegVT = VA.getLocVT();
1351 const TargetRegisterClass *RC;
1352 if (RegVT == MVT::i8) {
1353 RC = &AVR::GPR8RegClass;
1354 }
else if (RegVT == MVT::i16) {
1355 RC = &AVR::DREGSRegClass;
1361 ArgValue = DAG.getCopyFromReg(Chain, dl,
Reg, RegVT);
1369 switch (VA.getLocInfo()) {
1375 ArgValue = DAG.getNode(
ISD::BITCAST, dl, VA.getValVT(), ArgValue);
1379 DAG.getValueType(VA.getValVT()));
1380 ArgValue = DAG.getNode(
ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
1384 DAG.getValueType(VA.getValVT()));
1385 ArgValue = DAG.getNode(
ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
1389 InVals.push_back(ArgValue);
1394 EVT LocVT = VA.getLocVT();
1397 int FI = MFI.CreateFixedObject(LocVT.getSizeInBits() / 8,
1398 VA.getLocMemOffset(),
true);
1403 InVals.push_back(DAG.getLoad(LocVT, dl, Chain, FIN,
1411 unsigned StackSize = CCInfo.getStackSize();
1412 AVRMachineFunctionInfo *AFI = MF.getInfo<AVRMachineFunctionInfo>();
1414 AFI->setVarArgsFrameIndex(MFI.CreateFixedObject(2, StackSize,
true));
1426 SelectionDAG &DAG = CLI.DAG;
1428 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
1429 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
1430 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
1433 bool &isTailCall = CLI.IsTailCall;
1435 bool isVarArg = CLI.IsVarArg;
1437 MachineFunction &MF = DAG.getMachineFunction();
1444 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1452 const GlobalValue *GV =
G->getGlobal();
1456 DAG.getTargetGlobalAddress(GV,
DL,
getPointerTy(DAG.getDataLayout()));
1457 }
else if (
const ExternalSymbolSDNode *ES =
1459 Callee = DAG.getTargetExternalSymbol(ES->getSymbol(),
1465 CCInfo.AnalyzeCallOperands(Outs, ArgCC_AVR_Vararg);
1472 unsigned NumBytes = CCInfo.getStackSize();
1474 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0,
DL);
1480 bool HasStackArgs =
false;
1481 for (AI = 0, AE = ArgLocs.size(); AI != AE; ++AI) {
1482 CCValAssign &VA = ArgLocs[AI];
1483 EVT RegVT = VA.getLocVT();
1487 switch (VA.getLocInfo()) {
1508 if (VA.isMemLoc()) {
1509 HasStackArgs =
true;
1515 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1526 for (; AI != AE; AI++) {
1527 CCValAssign &VA = ArgLocs[AI];
1535 DAG.getRegister(AVR::SP,
getPointerTy(DAG.getDataLayout())),
1536 DAG.getIntPtrConstant(VA.getLocMemOffset() + 1,
DL));
1538 MemOpChains.push_back(
1539 DAG.getStore(Chain,
DL, Arg, PtrOff,
1543 if (!MemOpChains.empty())
1551 for (
auto Reg : RegsToPass) {
1552 Chain = DAG.getCopyToReg(Chain,
DL,
Reg.first,
Reg.second, InGlue);
1553 InGlue = Chain.getValue(1);
1557 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1559 Ops.push_back(Chain);
1560 Ops.push_back(Callee);
1564 for (
auto Reg : RegsToPass) {
1565 Ops.push_back(DAG.getRegister(
Reg.first,
Reg.second.getValueType()));
1570 Ops.push_back(DAG.getRegister(
Subtarget.getZeroRegister(), MVT::i8));
1573 const TargetRegisterInfo *
TRI =
Subtarget.getRegisterInfo();
1574 const uint32_t *
Mask =
1575 TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv);
1576 assert(Mask &&
"Missing call preserved mask for calling convention");
1577 Ops.push_back(DAG.getRegisterMask(Mask));
1579 if (InGlue.getNode()) {
1580 Ops.push_back(InGlue);
1583 Chain = DAG.getNode(AVRISD::CALL,
DL, NodeTys,
Ops);
1584 InGlue = Chain.getValue(1);
1587 Chain = DAG.getCALLSEQ_END(Chain, NumBytes, 0, InGlue,
DL);
1590 InGlue = Chain.getValue(1);
1595 return LowerCallResult(Chain, InGlue, CallConv, isVarArg, Ins,
DL, DAG,
1602SDValue AVRTargetLowering::LowerCallResult(
1609 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1614 CCInfo.AnalyzeCallResult(Ins, RetCC_AVR_BUILTIN);
1620 for (CCValAssign
const &RVLoc : RVLocs) {
1621 Chain = DAG.getCopyFromReg(Chain, dl, RVLoc.getLocReg(), RVLoc.getValVT(),
1624 InGlue = Chain.getValue(2);
1625 InVals.push_back(Chain.getValue(0));
1635bool AVRTargetLowering::CanLowerReturn(
1638 const Type *RetTy)
const {
1641 CCState CCInfo(CallConv, isVarArg, MF, RVLocs,
Context);
1642 return CCInfo.CheckReturn(Outs, RetCC_AVR_BUILTIN);
1646 return TotalBytes <= (unsigned)(
Subtarget.hasTinyEncoding() ? 4 : 8);
1659 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1662 MachineFunction &MF = DAG.getMachineFunction();
1666 CCInfo.AnalyzeReturn(Outs, RetCC_AVR_BUILTIN);
1674 for (
unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1675 CCValAssign &VA = RVLocs[i];
1676 assert(VA.isRegLoc() &&
"Can only return in registers!");
1678 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Glue);
1681 Glue = Chain.getValue(1);
1682 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
1687 if (MF.getFunction().getAttributes().hasFnAttr(Attribute::Naked)) {
1691 const AVRMachineFunctionInfo *AFI = MF.getInfo<AVRMachineFunctionInfo>();
1693 if (!AFI->isInterruptOrSignalHandler()) {
1698 RetOps.push_back(DAG.getRegister(
Subtarget.getZeroRegister(), MVT::i8));
1702 AFI->isInterruptOrSignalHandler() ? AVRISD::RETI_GLUE : AVRISD::RET_GLUE;
1706 if (Glue.getNode()) {
1707 RetOps.push_back(Glue);
1710 return DAG.getNode(RetOpc, dl, MVT::Other, RetOps);
1721 const TargetRegisterClass *RC;
1722 bool HasRepeatedOperand =
false;
1725 const TargetInstrInfo &
TII = *
Subtarget.getInstrInfo();
1728 switch (
MI.getOpcode()) {
1733 RC = &AVR::GPR8RegClass;
1734 HasRepeatedOperand =
true;
1738 RC = &AVR::DREGSRegClass;
1742 RC = &AVR::GPR8RegClass;
1746 RC = &AVR::DREGSRegClass;
1750 RC = &AVR::GPR8RegClass;
1754 RC = &AVR::DREGSRegClass;
1757 Opc =
Tiny ? AVR::ROLBRdR17 : AVR::ROLBRdR1;
1758 RC = &AVR::GPR8RegClass;
1762 RC = &AVR::DREGSRegClass;
1766 RC = &AVR::GPR8RegClass;
1770 RC = &AVR::DREGSRegClass;
1774 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1777 for (
I = BB->getIterator();
I !=
F->end() && &(*
I) != BB; ++
I)
1783 MachineBasicBlock *LoopBB =
F->CreateMachineBasicBlock(LLVM_BB);
1784 MachineBasicBlock *CheckBB =
F->CreateMachineBasicBlock(LLVM_BB);
1785 MachineBasicBlock *RemBB =
F->CreateMachineBasicBlock(LLVM_BB);
1787 F->insert(
I, LoopBB);
1788 F->insert(
I, CheckBB);
1789 F->insert(
I, RemBB);
1795 RemBB->transferSuccessorsAndUpdatePHIs(BB);
1798 BB->addSuccessor(CheckBB);
1799 LoopBB->addSuccessor(CheckBB);
1800 CheckBB->addSuccessor(LoopBB);
1801 CheckBB->addSuccessor(RemBB);
1803 Register ShiftAmtReg = RI.createVirtualRegister(&AVR::GPR8RegClass);
1804 Register ShiftAmtReg2 = RI.createVirtualRegister(&AVR::GPR8RegClass);
1805 Register ShiftReg = RI.createVirtualRegister(RC);
1806 Register ShiftReg2 = RI.createVirtualRegister(RC);
1807 Register ShiftAmtSrcReg =
MI.getOperand(2).getReg();
1818 if (HasRepeatedOperand)
1819 ShiftMI.
addReg(ShiftReg);
1827 BuildMI(CheckBB, dl,
TII.get(AVR::PHI), ShiftReg)
1832 BuildMI(CheckBB, dl,
TII.get(AVR::PHI), ShiftAmtReg)
1837 BuildMI(CheckBB, dl,
TII.get(AVR::PHI), DstReg)
1843 BuildMI(CheckBB, dl,
TII.get(AVR::DECRd), ShiftAmtReg2).
addReg(ShiftAmtReg);
1846 MI.eraseFromParent();
1873 Register ZeroReg =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
1881 if (ShiftLeft && (ShiftAmt % 8) >= 6) {
1886 size_t ShiftRegsOffset = ShiftAmt / 8;
1887 size_t ShiftRegsSize = Regs.size() - ShiftRegsOffset;
1889 Regs.
slice(ShiftRegsOffset, ShiftRegsSize);
1897 Register LowByte =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
1901 if (ShiftAmt % 8 == 6) {
1903 Register NewLowByte =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
1905 LowByte = NewLowByte;
1909 for (
size_t I = 0;
I < Regs.size();
I++) {
1910 int ShiftRegsIdx =
I + 1;
1911 if (ShiftRegsIdx < (
int)ShiftRegs.
size()) {
1912 Regs[
I] = ShiftRegs[ShiftRegsIdx];
1913 }
else if (ShiftRegsIdx == (
int)ShiftRegs.
size()) {
1914 Regs[
I] = std::pair(LowByte, 0);
1916 Regs[
I] = std::pair(ZeroReg, 0);
1924 if (!ShiftLeft && (ShiftAmt % 8) >= 6) {
1927 size_t ShiftRegsSize = Regs.size() - (ShiftAmt / 8);
1929 Regs.
slice(0, ShiftRegsSize);
1938 Register HighByte =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
1940 if (ArithmeticShift) {
1958 if (ShiftAmt % 8 == 6) {
1961 Register NewExt =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
1969 for (
int I = Regs.size() - 1;
I >= 0;
I--) {
1970 int ShiftRegsIdx =
I - (Regs.size() - ShiftRegs.
size()) - 1;
1971 if (ShiftRegsIdx >= 0) {
1972 Regs[
I] = ShiftRegs[ShiftRegsIdx];
1973 }
else if (ShiftRegsIdx == -1) {
1974 Regs[
I] = std::pair(HighByte, 0);
1976 Regs[
I] = std::pair(ExtByte, 0);
1985 while (ShiftLeft && ShiftAmt >= 8) {
1987 for (
size_t I = 0;
I < Regs.size() - 1;
I++) {
1988 Regs[
I] = Regs[
I + 1];
1992 Regs[Regs.size() - 1] = std::pair(ZeroReg, 0);
1995 Regs = Regs.drop_back(1);
2002 if (!ShiftLeft && ShiftAmt >= 8) {
2003 if (ArithmeticShift) {
2005 ShrExtendReg =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
2006 Register Tmp =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
2008 .
addReg(Regs[0].first, 0, Regs[0].second)
2009 .
addReg(Regs[0].first, 0, Regs[0].second);
2010 BuildMI(*BB,
MI, dl,
TII.get(AVR::SBCRdRr), ShrExtendReg)
2014 ShrExtendReg = ZeroReg;
2016 for (; ShiftAmt >= 8; ShiftAmt -= 8) {
2018 for (
size_t I = Regs.size() - 1;
I != 0;
I--) {
2019 Regs[
I] = Regs[
I - 1];
2023 Regs[0] = std::pair(ShrExtendReg, 0);
2026 Regs = Regs.drop_front(1);
2031 assert((ShiftAmt < 8) &&
"Unexpect shift amount");
2051 if (!ArithmeticShift && ShiftAmt >= 4) {
2053 for (
size_t I = 0;
I < Regs.size();
I++) {
2054 size_t Idx = ShiftLeft ?
I : Regs.size() -
I - 1;
2055 Register SwapReg =
MRI.createVirtualRegister(&AVR::LD8RegClass);
2057 .
addReg(Regs[Idx].first, 0, Regs[Idx].second);
2059 Register R =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
2065 Register AndReg =
MRI.createVirtualRegister(&AVR::LD8RegClass);
2068 .
addImm(ShiftLeft ? 0xf0 : 0x0f);
2070 Register R =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
2074 size_t PrevIdx = ShiftLeft ? Idx - 1 : Idx + 1;
2075 Regs[PrevIdx] = std::pair(R, 0);
2078 Regs[Idx] = std::pair(AndReg, 0);
2085 while (ShiftLeft && ShiftAmt) {
2087 for (ssize_t
I = Regs.size() - 1;
I >= 0;
I--) {
2088 Register Out =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
2091 if (
I == (ssize_t)Regs.size() - 1) {
2094 .
addReg(In, 0, InSubreg);
2098 .
addReg(In, 0, InSubreg);
2100 Regs[
I] = std::pair(Out, 0);
2104 while (!ShiftLeft && ShiftAmt) {
2106 for (
size_t I = 0;
I < Regs.size();
I++) {
2107 Register Out =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
2111 unsigned Opc = ArithmeticShift ? AVR::ASRRd : AVR::LSRRd;
2116 Regs[
I] = std::pair(Out, 0);
2121 if (ShiftAmt != 0) {
2128AVRTargetLowering::insertWideShift(MachineInstr &
MI,
2129 MachineBasicBlock *BB)
const {
2135 int64_t ShiftAmt =
MI.getOperand(4).getImm();
2137 switch (
MI.getOpcode()) {
2150 std::array<std::pair<Register, int>, 4>
Registers = {
2151 std::pair(
MI.getOperand(3).getReg(), AVR::sub_hi),
2152 std::pair(
MI.getOperand(3).getReg(), AVR::sub_lo),
2153 std::pair(
MI.getOperand(2).getReg(), AVR::sub_hi),
2154 std::pair(
MI.getOperand(2).getReg(), AVR::sub_lo),
2172 (
Opc !=
ISD::SRA || (ShiftAmt < 16 || ShiftAmt >= 22))) {
2174 BuildMI(*BB,
MI, dl,
TII.get(AVR::REG_SEQUENCE),
MI.getOperand(0).getReg())
2179 BuildMI(*BB,
MI, dl,
TII.get(AVR::REG_SEQUENCE),
MI.getOperand(1).getReg())
2186 BuildMI(*BB,
MI, dl,
TII.get(AVR::REG_SEQUENCE),
MI.getOperand(1).getReg())
2191 BuildMI(*BB,
MI, dl,
TII.get(AVR::REG_SEQUENCE),
MI.getOperand(0).getReg())
2199 MI.eraseFromParent();
2204 if (
I->getOpcode() == AVR::COPY) {
2205 Register SrcReg =
I->getOperand(1).getReg();
2206 return (SrcReg == AVR::R0 || SrcReg == AVR::R1);
2215MachineBasicBlock *AVRTargetLowering::insertMul(MachineInstr &
MI,
2216 MachineBasicBlock *BB)
const {
2224 BuildMI(*BB,
I,
MI.getDebugLoc(),
TII.get(AVR::EORRdRr), AVR::R1)
2234 const TargetInstrInfo &
TII = *
Subtarget.getInstrInfo();
2237 .
add(
MI.getOperand(0))
2239 MI.eraseFromParent();
2247 MachineRegisterInfo &
MRI = BB->getParent()->getRegInfo();
2248 const TargetInstrInfo &
TII = *
Subtarget.getInstrInfo();
2261 const TargetRegisterClass *RC =
2262 (Width == 8) ? &AVR::GPR8RegClass : &AVR::DREGSRegClass;
2263 unsigned LoadOpcode = (Width == 8) ? AVR::LDRdPtr : AVR::LDWRdPtr;
2264 unsigned StoreOpcode = (Width == 8) ? AVR::STPtrRr : AVR::STWPtrRr;
2272 BuildMI(*BB,
I, dl,
TII.get(LoadOpcode),
MI.getOperand(0).getReg())
2273 .
add(
MI.getOperand(1));
2279 .
add(
MI.getOperand(2));
2283 .
add(
MI.getOperand(1))
2292 MI.eraseFromParent();
2299 int Opc =
MI.getOpcode();
2315 return insertShift(
MI,
MBB, STI.hasTinyEncoding());
2319 return insertWideShift(
MI,
MBB);
2322 return insertMul(
MI,
MBB);
2324 return insertCopyZero(
MI,
MBB);
2325 case AVR::AtomicLoadAdd8:
2326 return insertAtomicArithmeticOp(
MI,
MBB, AVR::ADDRdRr, 8);
2327 case AVR::AtomicLoadAdd16:
2328 return insertAtomicArithmeticOp(
MI,
MBB, AVR::ADDWRdRr, 16);
2329 case AVR::AtomicLoadSub8:
2330 return insertAtomicArithmeticOp(
MI,
MBB, AVR::SUBRdRr, 8);
2331 case AVR::AtomicLoadSub16:
2332 return insertAtomicArithmeticOp(
MI,
MBB, AVR::SUBWRdRr, 16);
2333 case AVR::AtomicLoadAnd8:
2334 return insertAtomicArithmeticOp(
MI,
MBB, AVR::ANDRdRr, 8);
2335 case AVR::AtomicLoadAnd16:
2336 return insertAtomicArithmeticOp(
MI,
MBB, AVR::ANDWRdRr, 16);
2337 case AVR::AtomicLoadOr8:
2338 return insertAtomicArithmeticOp(
MI,
MBB, AVR::ORRdRr, 8);
2339 case AVR::AtomicLoadOr16:
2340 return insertAtomicArithmeticOp(
MI,
MBB, AVR::ORWRdRr, 16);
2341 case AVR::AtomicLoadXor8:
2342 return insertAtomicArithmeticOp(
MI,
MBB, AVR::EORRdRr, 8);
2343 case AVR::AtomicLoadXor16:
2344 return insertAtomicArithmeticOp(
MI,
MBB, AVR::EORWRdRr, 16);
2347 assert((
Opc == AVR::Select16 ||
Opc == AVR::Select8) &&
2348 "Unexpected instr type to insert");
2369 if (FallThrough !=
nullptr) {
2385 unsigned CallFrameSize =
TII.getCallFrameSizeAt(
MI);
2399 MBB->addSuccessor(falseMBB);
2400 MBB->addSuccessor(trueMBB);
2408 MI.getOperand(0).getReg())
2414 MI.eraseFromParent();
2424 if (Constraint.
size() == 1) {
2426 switch (Constraint[0]) {
2469 switch (ConstraintCode[0]) {
2480 Value *CallOperandVal =
info.CallOperandVal;
2485 if (!CallOperandVal) {
2490 switch (*constraint) {
2529 if ((
C->getSExtValue() >= -63) && (
C->getSExtValue() <= 0)) {
2536 if (
C->getZExtValue() == 2) {
2543 if (
C->getZExtValue() == 0) {
2557 if (
C->getSExtValue() == -1) {
2564 if ((
C->getZExtValue() == 8) || (
C->getZExtValue() == 16) ||
2565 (
C->getZExtValue() == 24)) {
2572 if (
C->getZExtValue() == 1) {
2579 if ((
C->getSExtValue() >= -6) && (
C->getSExtValue() <= 5)) {
2592std::pair<unsigned, const TargetRegisterClass *>
2596 if (Constraint.
size() == 1) {
2597 switch (Constraint[0]) {
2600 return std::make_pair(0U, &AVR::LD8loRegClass);
2601 else if (VT == MVT::i16)
2602 return std::make_pair(0U, &AVR::DREGSLD8loRegClass);
2605 if (VT == MVT::i8 || VT == MVT::i16)
2606 return std::make_pair(0U, &AVR::PTRDISPREGSRegClass);
2610 return std::make_pair(0U, &AVR::LD8RegClass);
2611 else if (VT == MVT::i16)
2612 return std::make_pair(0U, &AVR::DLDREGSRegClass);
2616 return std::make_pair(0U, &AVR::GPR8loRegClass);
2617 else if (VT == MVT::i16)
2618 return std::make_pair(0U, &AVR::DREGSloRegClass);
2621 if (VT == MVT::i8 || VT == MVT::i16)
2622 return std::make_pair(0U, &AVR::PTRREGSRegClass);
2625 return std::make_pair(0U, &AVR::GPRSPRegClass);
2628 return std::make_pair(0U, &AVR::GPR8RegClass);
2629 else if (VT == MVT::i16)
2630 return std::make_pair(0U, &AVR::DREGSRegClass);
2634 return std::make_pair(
unsigned(
Subtarget.getTmpRegister()),
2635 &AVR::GPR8RegClass);
2638 if (VT == MVT::i8 || VT == MVT::i16)
2639 return std::make_pair(0U, &AVR::IWREGSRegClass);
2643 if (VT == MVT::i8 || VT == MVT::i16)
2644 return std::make_pair(
unsigned(AVR::R27R26), &AVR::PTRREGSRegClass);
2648 if (VT == MVT::i8 || VT == MVT::i16)
2649 return std::make_pair(
unsigned(AVR::R29R28), &AVR::PTRREGSRegClass);
2653 if (VT == MVT::i8 || VT == MVT::i16)
2654 return std::make_pair(
unsigned(AVR::R31R30), &AVR::PTRREGSRegClass);
2662 Subtarget.getRegisterInfo(), Constraint, VT);
2667 std::vector<SDValue> &
Ops,
2671 EVT Ty =
Op.getValueType();
2674 if (Constraint.
size() != 1) {
2678 char ConstraintLetter = Constraint[0];
2679 switch (ConstraintLetter) {
2697 int64_t CVal64 =
C->getSExtValue();
2699 switch (ConstraintLetter) {
2706 if (CVal64 < -63 || CVal64 > 0)
2726 if (Ty.getSimpleVT() == MVT::i8) {
2737 if (CUVal64 != 8 && CUVal64 != 16 && CUVal64 != 24)
2747 if (CVal64 < -6 || CVal64 > 5)
2757 if (!FC || !FC->isZero())
2764 if (Result.getNode()) {
2765 Ops.push_back(Result);
2778 .
Case(
"r0", AVR::R0)
2779 .
Case(
"r1", AVR::R1)
2783 .
Case(
"r0", AVR::R1R0)
2784 .
Case(
"sp", AVR::SP)
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
const HexagonInstrInfo * TII
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Register const TargetRegisterInfo * TRI
Promote Memory to Register
SI Pre allocate WWM Registers
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
Utilities related to the AVR instruction set.
A specific AVR target MCU.
Register getZeroRegister() const
const AVRInstrInfo * getInstrInfo() const override
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
Replace a node with an illegal result type with a new node built out of custom code.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
Returns true by value, base pointer and offset pointer and addressing mode by reference if the node's...
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
ConstraintType getConstraintType(StringRef Constraint) const override
Given a constraint, return the type of constraint it is for this target.
const AVRSubtarget & Subtarget
InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const override
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
AVRTargetLowering(const AVRTargetMachine &TM, const AVRSubtarget &STI)
void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
Lower the specified operand into the Ops vector.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
Return the ValueType of the result of SETCC operations.
bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
Returns true by value, base pointer and offset pointer and addressing mode by reference if this node ...
A generic AVR implementation.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
LLVM Basic Block Representation.
CCState - This class holds information needed while lowering arguments and return values.
MCRegister AllocateReg(MCPhysReg Reg)
AllocateReg - Attempt to allocate one register.
LLVMContext & getContext() const
int64_t AllocateStack(unsigned Size, Align Alignment)
AllocateStack - Allocate a chunk of stack space with the specified size and alignment.
void addLoc(const CCValAssign &V)
static CCValAssign getReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP, bool IsCustom=false)
static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)
ConstantFP - Floating Point Values [float, double].
This is the shared class of boolean and integer constants.
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
LLVM_ABI Align getABITypeAlign(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
LLVM_ABI TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
This is an important class for using LLVM in a threaded context.
This class is used to represent ISD::LOAD nodes.
static auto integer_valuetypes()
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
LLVM_ABI void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
void setCallFrameSize(unsigned N)
Set the call frame size on entry to this basic block.
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
BasicBlockListType::iterator iterator
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineInstr - Allocate a new MachineInstr.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
MutableArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDValue getValue(unsigned R) const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
LLVM_ABI SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
This class is used to represent ISD::STORE nodes.
StringRef - Represent a constant reference to a string, i.e.
constexpr size_t size() const
size - Get the string size.
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
static LLVM_ABI StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
TargetInstrInfo - Interface to description of machine instruction set.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
void setIndexedLoadAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed load does or does not work with the specified type and indicate w...
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
void setIndexedStoreAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed store does or does not work with the specified type and indicate ...
void setSupportsUnalignedAtomics(bool UnalignedSupported)
Sets whether unaligned atomic operations are supported.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setMinimumJumpTableEntries(unsigned Val)
Indicate the minimum number of blocks to generate jump tables.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
@ ZeroOrOneBooleanContent
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
std::vector< ArgListEntry > ArgListTy
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
TargetLowering(const TargetLowering &)=delete
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetInstrInfo * getInstrInfo() const
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM Value Representation.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
CondCodes
AVR specific condition codes.
@ COND_SH
Unsigned same or higher.
@ COND_GE
Greater than or equal.
bool isProgramMemoryAccess(MemSDNode const *N)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ AVR_BUILTIN
Used for special AVR rtlib functions which have an "optimized" convention to preserve registers.
@ C
The default llvm calling convention, compatible with C.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ BSWAP
Byte Swap and Counting operators.
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
@ ADD
Simple integer binary arithmetic operators.
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ SIGN_EXTEND
Conversion operators.
@ BR_CC
BR_CC - Conditional branch.
@ BR_JT
BR_JT - Jumptable branch.
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ BasicBlock
Various leaf nodes.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ ATOMIC_CMP_SWAP
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo,...
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
@ INLINEASM
INLINEASM - Represents an inline asm block.
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ BRCOND
BRCOND - Conditional branch.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
@ Undef
Value of the register doesn't matter.
This is an optimization pass for GlobalISel generic memory operations.
FunctionAddr VTableAddr Value
static void analyzeReturnValues(const SmallVectorImpl< ArgT > &Args, CCState &CCInfo, bool Tiny)
Analyze incoming and outgoing value of returning from a function.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
static const MCPhysReg RegList16Tiny[]
constexpr bool has_single_bit(T Value) noexcept
static const MCPhysReg RegList8Tiny[]
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
static void analyzeArguments(TargetLowering::CallLoweringInfo *CLI, const Function *F, const DataLayout *TD, const SmallVectorImpl< ArgT > &Args, SmallVectorImpl< CCValAssign > &ArgLocs, CCState &CCInfo, bool Tiny)
Analyze incoming and outgoing function arguments.
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
static const MCPhysReg RegList16AVR[]
@ Sub
Subtraction of integers.
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
static unsigned getTotalArgumentsSizeInBytes(const SmallVectorImpl< ArgT > &Args)
Count the total number of bytes needed to pass or return these arguments.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
DWARFExpression::Operation Op
static AVRCC::CondCodes intCCToAVRCC(ISD::CondCode CC)
IntCCToAVRCC - Convert a DAG integer condition code to an AVR CC.
ArrayRef(const T &OneElt) -> ArrayRef< T >
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
static bool isCopyMulResult(MachineBasicBlock::iterator const &I)
static void insertMultibyteShift(MachineInstr &MI, MachineBasicBlock *BB, MutableArrayRef< std::pair< Register, int > > Regs, ISD::NodeType Opc, int64_t ShiftAmt)
static const MCPhysReg RegList8AVR[]
Registers for calling conventions, ordered in reverse as required by ABI.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
bool isVector() const
Return true if this is a vector value type.
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
static LLVM_ABI MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
This contains information for each constraint that we are lowering.
This structure contains all information that is necessary for lowering calls.