177 if (!
Subtarget.supportsMultiplication()) {
214 EVT VT =
Op.getValueType();
217 "Expected power-of-2 shift amount");
225 SDVTList ResTys = DAG.
getVTList(MVT::i16, MVT::i16);
232 uint64_t ShiftAmount =
N->getConstantOperandVal(1);
233 if (ShiftAmount == 16) {
238 switch (
Op.getOpcode()) {
251 switch (
Op.getOpcode()) {
271 switch (
Op.getOpcode()) {
275 return DAG.
getNode(AVRISD::LSLLOOP, dl, VT,
N->getOperand(0),
278 return DAG.
getNode(AVRISD::LSRLOOP, dl, VT,
N->getOperand(0),
282 EVT AmtVT = Amt.getValueType();
285 return DAG.
getNode(AVRISD::ROLLOOP, dl, VT,
N->getOperand(0), Amt);
289 EVT AmtVT = Amt.getValueType();
292 return DAG.
getNode(AVRISD::RORLOOP, dl, VT,
N->getOperand(0), Amt);
295 return DAG.
getNode(AVRISD::ASRLOOP, dl, VT,
N->getOperand(0),
300 uint64_t ShiftAmount =
N->getConstantOperandVal(1);
303 switch (
Op.getOpcode()) {
327 if (
Op.getOpcode() ==
ISD::SHL && 4 <= ShiftAmount && ShiftAmount < 7) {
329 Victim = DAG.
getNode(AVRISD::SWAP, dl, VT, Victim);
333 }
else if (
Op.getOpcode() ==
ISD::SRL && 4 <= ShiftAmount &&
336 Victim = DAG.
getNode(AVRISD::SWAP, dl, VT, Victim);
340 }
else if (
Op.getOpcode() ==
ISD::SHL && ShiftAmount == 7) {
342 Victim = DAG.
getNode(AVRISD::LSLBN, dl, VT, Victim,
345 }
else if (
Op.getOpcode() ==
ISD::SRL && ShiftAmount == 7) {
347 Victim = DAG.
getNode(AVRISD::LSRBN, dl, VT, Victim,
350 }
else if (
Op.getOpcode() ==
ISD::SRA && ShiftAmount == 6) {
352 Victim = DAG.
getNode(AVRISD::ASRBN, dl, VT, Victim,
355 }
else if (
Op.getOpcode() ==
ISD::SRA && ShiftAmount == 7) {
357 Victim = DAG.
getNode(AVRISD::ASRBN, dl, VT, Victim,
360 }
else if (
Op.getOpcode() ==
ISD::ROTL && ShiftAmount == 3) {
362 Victim = DAG.
getNode(AVRISD::SWAP, dl, VT, Victim);
363 Victim = DAG.
getNode(AVRISD::ROR, dl, VT, Victim);
365 }
else if (
Op.getOpcode() ==
ISD::ROTR && ShiftAmount == 3) {
367 Victim = DAG.
getNode(AVRISD::SWAP, dl, VT, Victim);
368 Victim = DAG.
getNode(AVRISD::ROL, dl, VT, Victim);
370 }
else if (
Op.getOpcode() ==
ISD::ROTL && ShiftAmount == 7) {
372 Victim = DAG.
getNode(AVRISD::ROR, dl, VT, Victim);
374 }
else if (
Op.getOpcode() ==
ISD::ROTR && ShiftAmount == 7) {
376 Victim = DAG.
getNode(AVRISD::ROL, dl, VT, Victim);
381 Victim = DAG.
getNode(AVRISD::SWAP, dl, VT, Victim);
387 switch (ShiftAmount) {
389 Victim = DAG.
getNode(AVRISD::ASRWN, dl, VT, Victim,
394 Victim = DAG.
getNode(AVRISD::ASRWN, dl, VT, Victim,
399 Victim = DAG.
getNode(AVRISD::ASRWN, dl, VT, Victim,
406 if (4 <= ShiftAmount && ShiftAmount < 8)
407 switch (
Op.getOpcode()) {
409 Victim = DAG.
getNode(AVRISD::LSLWN, dl, VT, Victim,
414 Victim = DAG.
getNode(AVRISD::LSRWN, dl, VT, Victim,
421 else if (8 <= ShiftAmount && ShiftAmount < 12)
422 switch (
Op.getOpcode()) {
424 Victim = DAG.
getNode(AVRISD::LSLWN, dl, VT, Victim,
428 Opc8 = AVRISD::LSLHI;
431 Victim = DAG.
getNode(AVRISD::LSRWN, dl, VT, Victim,
435 Opc8 = AVRISD::LSRLO;
438 Victim = DAG.
getNode(AVRISD::ASRWN, dl, VT, Victim,
442 Opc8 = AVRISD::ASRLO;
447 else if (12 <= ShiftAmount)
448 switch (
Op.getOpcode()) {
450 Victim = DAG.
getNode(AVRISD::LSLWN, dl, VT, Victim,
454 Opc8 = AVRISD::LSLHI;
457 Victim = DAG.
getNode(AVRISD::LSRWN, dl, VT, Victim,
461 Opc8 = AVRISD::LSRLO;
464 Victim = DAG.
getNode(AVRISD::ASRWN, dl, VT, Victim,
468 Opc8 = AVRISD::ASRLO;
475 while (ShiftAmount--) {
476 Victim = DAG.
getNode(Opc8, dl, VT, Victim);
483 unsigned Opcode =
Op->getOpcode();
485 "Invalid opcode for Div/Rem lowering");
487 EVT VT =
Op->getValueType(0);
488 Type *Ty = VT.getTypeForEVT(*DAG.getContext());
491 switch (VT.getSimpleVT().SimpleTy) {
495 LC = IsSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8;
498 LC = IsSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16;
501 LC = IsSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32;
505 SDValue InChain = DAG.getEntryNode();
509 TargetLowering::ArgListEntry
Entry(
510 Value,
Value.getValueType().getTypeForEVT(*DAG.getContext()));
511 Entry.IsSExt = IsSigned;
512 Entry.IsZExt = !IsSigned;
513 Args.push_back(Entry);
516 RTLIB::LibcallImpl LCImpl = DAG.getLibcalls().getLibcallImpl(LC);
517 if (LCImpl == RTLIB::Unsupported)
521 DAG.getExternalSymbol(LCImpl,
getPointerTy(DAG.getDataLayout()));
526 TargetLowering::CallLoweringInfo CLI(DAG);
529 .setLibCallee(DAG.getLibcalls().getLibcallImplCallingConv(LCImpl), RetTy,
530 Callee, std::move(Args))
532 .setSExtResult(IsSigned)
533 .setZExtResult(!IsSigned);
535 std::pair<SDValue, SDValue> CallInfo =
LowerCallTo(CLI);
536 return CallInfo.first;
541 auto DL = DAG.getDataLayout();
554 auto DL = DAG.getDataLayout();
584 SelectionDAG &DAG, SDLoc
DL)
const {
585 assert((
LHS.getSimpleValueType() ==
RHS.getSimpleValueType()) &&
586 "LHS and RHS have different types");
587 assert(((
LHS.getSimpleValueType() == MVT::i16) ||
588 (
LHS.getSimpleValueType() == MVT::i8)) &&
589 "invalid comparison type");
598 DAG.getIntPtrConstant(0,
DL));
600 DAG.getIntPtrConstant(1,
DL));
601 SDValue RHSlo = (Imm & 0xff) == 0
604 DAG.getIntPtrConstant(0,
DL));
605 SDValue RHShi = (Imm & 0xff00) == 0
608 DAG.getIntPtrConstant(1,
DL));
609 Cmp = DAG.getNode(AVRISD::CMP,
DL, MVT::Glue, LHSlo, RHSlo);
610 Cmp = DAG.getNode(AVRISD::CMPC,
DL, MVT::Glue, LHShi, RHShi, Cmp);
614 uint64_t
Imm =
LHS->getAsZExtVal();
616 ? DAG.getRegister(
Subtarget.getZeroRegister(), MVT::i8)
618 DAG.getIntPtrConstant(0,
DL));
620 ? DAG.getRegister(
Subtarget.getZeroRegister(), MVT::i8)
622 DAG.getIntPtrConstant(1,
DL));
624 DAG.getIntPtrConstant(0,
DL));
626 DAG.getIntPtrConstant(1,
DL));
627 Cmp = DAG.getNode(AVRISD::CMP,
DL, MVT::Glue, LHSlo, RHSlo);
628 Cmp = DAG.getNode(AVRISD::CMPC,
DL, MVT::Glue, LHShi, RHShi, Cmp);
631 Cmp = DAG.getNode(AVRISD::CMP,
DL, MVT::Glue,
LHS,
RHS);
643 EVT VT =
LHS.getValueType();
644 bool UseTest =
false;
657 switch (
C->getSExtValue()) {
669 LHS = DAG.getConstant(0,
DL, VT);
676 RHS = DAG.getSignedConstant(
C->getSExtValue() + 1,
DL, VT);
690 switch (
C->getSExtValue()) {
695 LHS = DAG.getConstant(0,
DL, VT);
720 if (
C->getConstantIntValue()->isMaxValue(
false)) {
724 RHS = DAG.getConstant(
C->getZExtValue() + 1,
DL, VT);
738 if (VT == MVT::i32) {
740 DAG.getIntPtrConstant(0,
DL));
742 DAG.getIntPtrConstant(1,
DL));
744 DAG.getIntPtrConstant(0,
DL));
746 DAG.getIntPtrConstant(1,
DL));
751 DAG.getIntPtrConstant(1,
DL));
752 Cmp = DAG.getNode(AVRISD::TST,
DL, MVT::Glue, Top);
754 Cmp = getAVRCmp(LHSlo, RHSlo, DAG,
DL);
755 Cmp = DAG.getNode(AVRISD::CMPC,
DL, MVT::Glue, LHShi, RHShi, Cmp);
757 }
else if (VT == MVT::i64) {
759 DAG.getIntPtrConstant(0,
DL));
761 DAG.getIntPtrConstant(1,
DL));
764 DAG.getIntPtrConstant(0,
DL));
766 DAG.getIntPtrConstant(1,
DL));
768 DAG.getIntPtrConstant(0,
DL));
770 DAG.getIntPtrConstant(1,
DL));
773 DAG.getIntPtrConstant(0,
DL));
775 DAG.getIntPtrConstant(1,
DL));
778 DAG.getIntPtrConstant(0,
DL));
780 DAG.getIntPtrConstant(1,
DL));
782 DAG.getIntPtrConstant(0,
DL));
784 DAG.getIntPtrConstant(1,
DL));
789 DAG.getIntPtrConstant(1,
DL));
790 Cmp = DAG.getNode(AVRISD::TST,
DL, MVT::Glue, Top);
792 Cmp = getAVRCmp(LHS0, RHS0, DAG,
DL);
793 Cmp = DAG.getNode(AVRISD::CMPC,
DL, MVT::Glue, LHS1, RHS1, Cmp);
794 Cmp = DAG.getNode(AVRISD::CMPC,
DL, MVT::Glue, LHS2, RHS2, Cmp);
795 Cmp = DAG.getNode(AVRISD::CMPC,
DL, MVT::Glue, LHS3, RHS3, Cmp);
797 }
else if (VT == MVT::i8 || VT == MVT::i16) {
800 Cmp = DAG.getNode(AVRISD::TST,
DL, MVT::Glue,
804 LHS, DAG.getIntPtrConstant(1,
DL)));
831 return DAG.getNode(AVRISD::BRCOND, dl, MVT::Other, Chain, Dest, TargetCC,
848 return DAG.getNode(AVRISD::SELECT_CC, dl,
Op.getValueType(),
Ops);
860 SDValue TrueV = DAG.getConstant(1,
DL,
Op.getValueType());
861 SDValue FalseV = DAG.getConstant(0,
DL,
Op.getValueType());
864 return DAG.getNode(AVRISD::SELECT_CC,
DL,
Op.getValueType(),
Ops);
868 const MachineFunction &MF = DAG.getMachineFunction();
869 const AVRMachineFunctionInfo *AFI = MF.getInfo<AVRMachineFunctionInfo>();
871 auto DL = DAG.getDataLayout();
878 return DAG.getStore(
Op.getOperand(0), dl, FI,
Op.getOperand(1),
879 MachinePointerInfo(SV));
900 SDNode *
N =
Op.getNode();
902 for (
unsigned I = 0;
I <
N->getNumOperands();
I++) {
904 if (Operand.getValueType() == MVT::Glue) {
909 Ops.push_back(Operand);
913 Ops.push_back(DAG.getTargetConstant(Flags, dl, MVT::i32));
914 Ops.push_back(ZeroReg);
922 DAG.ReplaceAllUsesOfValueWith(
Op, New);
923 DAG.ReplaceAllUsesOfValueWith(
Op.getValue(1),
New.getValue(1));
929 switch (
Op.getOpcode()) {
937 return LowerShifts(
Op, DAG);
939 return LowerGlobalAddress(
Op, DAG);
941 return LowerBlockAddress(
Op, DAG);
943 return LowerBR_CC(
Op, DAG);
945 return LowerSELECT_CC(
Op, DAG);
947 return LowerSETCC(
Op, DAG);
949 return LowerVASTART(
Op, DAG);
952 return LowerDivRem(
Op, DAG);
954 return LowerINLINEASM(
Op, DAG);
967 switch (
N->getOpcode()) {
1030 VT = LD->getMemoryVT();
1031 Op = LD->getBasePtr().getNode();
1038 VT = ST->getMemoryVT();
1039 Op = ST->getBasePtr().getNode();
1047 if (VT != MVT::i8 && VT != MVT::i16) {
1056 int RHSC =
RHS->getSExtValue();
1060 if ((VT == MVT::i16 && RHSC != -2) || (VT == MVT::i8 && RHSC != -1)) {
1064 Base =
Op->getOperand(0);
1087 VT = LD->getMemoryVT();
1088 Ptr = LD->getBasePtr();
1092 VT = ST->getMemoryVT();
1093 Ptr = ST->getBasePtr();
1101 if (VT == MVT::i16 && !
Subtarget.hasLowByteFirst())
1107 if (VT != MVT::i8 && VT != MVT::i16) {
1116 int RHSC =
RHS->getSExtValue();
1119 if ((VT == MVT::i16 && RHSC != 2) || (VT == MVT::i8 && RHSC != 1)) {
1129 Base =
Op->getOperand(0);
1154#include "AVRGenCallingConv.inc"
1159 AVR::R25, AVR::R24, AVR::R23, AVR::R22, AVR::R21, AVR::R20,
1160 AVR::R19, AVR::R18, AVR::R17, AVR::R16, AVR::R15, AVR::R14,
1161 AVR::R13, AVR::R12, AVR::R11, AVR::R10, AVR::R9, AVR::R8};
1163 AVR::R22, AVR::R21, AVR::R20};
1165 AVR::R26R25, AVR::R25R24, AVR::R24R23, AVR::R23R22, AVR::R22R21,
1166 AVR::R21R20, AVR::R20R19, AVR::R19R18, AVR::R18R17, AVR::R17R16,
1167 AVR::R16R15, AVR::R15R14, AVR::R14R13, AVR::R13R12, AVR::R12R11,
1168 AVR::R11R10, AVR::R10R9, AVR::R9R8};
1170 AVR::R24R23, AVR::R23R22,
1171 AVR::R22R21, AVR::R21R20};
1174 "8-bit and 16-bit register arrays must be of equal length");
1176 "8-bit and 16-bit register arrays must be of equal length");
1182template <
typename ArgT>
1199 unsigned NumArgs = Args.size();
1202 int RegLastIdx = -1;
1204 bool UseStack =
false;
1205 for (
unsigned i = 0; i != NumArgs;) {
1206 MVT VT = Args[i].VT;
1211 unsigned ArgIndex = Args[i].OrigArgIndex;
1214 for (; j != NumArgs; ++j) {
1215 if (Args[j].OrigArgIndex != ArgIndex)
1217 TotalBytes += Args[j].VT.getStoreSize();
1220 TotalBytes =
alignTo(TotalBytes, 2);
1222 if (TotalBytes == 0)
1225 unsigned RegIdx = RegLastIdx + TotalBytes;
1226 RegLastIdx = RegIdx;
1228 if (RegIdx >= RegList8.
size()) {
1231 for (; i != j; ++i) {
1232 MVT VT = Args[i].VT;
1242 if (VT == MVT::i8) {
1244 }
else if (VT == MVT::i16) {
1248 "calling convention can only manage i8 and i16 types");
1250 assert(
Reg &&
"register not available in calling convention");
1261template <
typename ArgT>
1264 unsigned TotalBytes = 0;
1266 for (
const ArgT &Arg : Args) {
1267 TotalBytes += Arg.VT.getStoreSize();
1275template <
typename ArgT>
1278 unsigned NumArgs = Args.size();
1282 assert(TotalBytes <= 4 &&
1283 "return values greater than 4 bytes cannot be lowered on AVRTiny");
1285 assert(TotalBytes <= 8 &&
1286 "return values greater than 8 bytes cannot be lowered on AVR");
1301 if (TotalBytes > 4) {
1304 TotalBytes =
alignTo(TotalBytes, 2);
1308 int RegIdx = TotalBytes - 1;
1309 for (
unsigned i = 0; i != NumArgs; ++i) {
1310 MVT VT = Args[i].VT;
1312 if (VT == MVT::i8) {
1314 }
else if (VT == MVT::i16) {
1319 assert(
Reg &&
"register not available in calling convention");
1326SDValue AVRTargetLowering::LowerFormalArguments(
1328 const SmallVectorImpl<ISD::InputArg> &Ins,
const SDLoc &dl,
1329 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals)
const {
1330 MachineFunction &MF = DAG.getMachineFunction();
1331 MachineFrameInfo &MFI = MF.getFrameInfo();
1332 auto DL = DAG.getDataLayout();
1336 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1341 CCInfo.AnalyzeFormalArguments(Ins, ArgCC_AVR_Vararg);
1348 for (CCValAssign &VA : ArgLocs) {
1351 if (VA.isRegLoc()) {
1352 EVT RegVT = VA.getLocVT();
1353 const TargetRegisterClass *RC;
1354 if (RegVT == MVT::i8) {
1355 RC = &AVR::GPR8RegClass;
1356 }
else if (RegVT == MVT::i16) {
1357 RC = &AVR::DREGSRegClass;
1363 ArgValue = DAG.getCopyFromReg(Chain, dl,
Reg, RegVT);
1371 switch (VA.getLocInfo()) {
1377 ArgValue = DAG.getNode(
ISD::BITCAST, dl, VA.getValVT(), ArgValue);
1381 DAG.getValueType(VA.getValVT()));
1382 ArgValue = DAG.getNode(
ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
1386 DAG.getValueType(VA.getValVT()));
1387 ArgValue = DAG.getNode(
ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
1391 InVals.push_back(ArgValue);
1396 EVT LocVT = VA.getLocVT();
1399 int FI = MFI.CreateFixedObject(LocVT.getSizeInBits() / 8,
1400 VA.getLocMemOffset(),
true);
1405 InVals.push_back(DAG.getLoad(LocVT, dl, Chain, FIN,
1413 unsigned StackSize = CCInfo.getStackSize();
1414 AVRMachineFunctionInfo *AFI = MF.getInfo<AVRMachineFunctionInfo>();
1416 AFI->setVarArgsFrameIndex(MFI.CreateFixedObject(2, StackSize,
true));
1428 SelectionDAG &DAG = CLI.DAG;
1430 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
1431 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
1432 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
1435 bool &isTailCall = CLI.IsTailCall;
1437 bool isVarArg = CLI.IsVarArg;
1439 MachineFunction &MF = DAG.getMachineFunction();
1446 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1454 const GlobalValue *GV =
G->getGlobal();
1458 DAG.getTargetGlobalAddress(GV,
DL,
getPointerTy(DAG.getDataLayout()));
1459 }
else if (
const ExternalSymbolSDNode *ES =
1461 Callee = DAG.getTargetExternalSymbol(ES->getSymbol(),
1467 CCInfo.AnalyzeCallOperands(Outs, ArgCC_AVR_Vararg);
1474 unsigned NumBytes = CCInfo.getStackSize();
1476 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0,
DL);
1482 bool HasStackArgs =
false;
1483 for (AI = 0, AE = ArgLocs.size(); AI != AE; ++AI) {
1484 CCValAssign &VA = ArgLocs[AI];
1485 EVT RegVT = VA.getLocVT();
1489 switch (VA.getLocInfo()) {
1510 if (VA.isMemLoc()) {
1511 HasStackArgs =
true;
1517 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1528 for (; AI != AE; AI++) {
1529 CCValAssign &VA = ArgLocs[AI];
1537 DAG.getRegister(AVR::SP,
getPointerTy(DAG.getDataLayout())),
1538 DAG.getIntPtrConstant(VA.getLocMemOffset() + 1,
DL));
1540 MemOpChains.push_back(
1541 DAG.getStore(Chain,
DL, Arg, PtrOff,
1545 if (!MemOpChains.empty())
1553 for (
auto Reg : RegsToPass) {
1554 Chain = DAG.getCopyToReg(Chain,
DL,
Reg.first,
Reg.second, InGlue);
1555 InGlue = Chain.getValue(1);
1559 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1561 Ops.push_back(Chain);
1562 Ops.push_back(Callee);
1566 for (
auto Reg : RegsToPass) {
1567 Ops.push_back(DAG.getRegister(
Reg.first,
Reg.second.getValueType()));
1572 Ops.push_back(DAG.getRegister(
Subtarget.getZeroRegister(), MVT::i8));
1575 const TargetRegisterInfo *
TRI =
Subtarget.getRegisterInfo();
1576 const uint32_t *
Mask =
1577 TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv);
1578 assert(Mask &&
"Missing call preserved mask for calling convention");
1579 Ops.push_back(DAG.getRegisterMask(Mask));
1581 if (InGlue.getNode()) {
1582 Ops.push_back(InGlue);
1585 Chain = DAG.getNode(AVRISD::CALL,
DL, NodeTys,
Ops);
1586 InGlue = Chain.getValue(1);
1589 Chain = DAG.getCALLSEQ_END(Chain, NumBytes, 0, InGlue,
DL);
1592 InGlue = Chain.getValue(1);
1597 return LowerCallResult(Chain, InGlue, CallConv, isVarArg, Ins,
DL, DAG,
1604SDValue AVRTargetLowering::LowerCallResult(
1611 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1616 CCInfo.AnalyzeCallResult(Ins, RetCC_AVR_BUILTIN);
1622 for (CCValAssign
const &RVLoc : RVLocs) {
1623 Chain = DAG.getCopyFromReg(Chain, dl, RVLoc.getLocReg(), RVLoc.getValVT(),
1626 InGlue = Chain.getValue(2);
1627 InVals.push_back(Chain.getValue(0));
1637bool AVRTargetLowering::CanLowerReturn(
1640 const Type *RetTy)
const {
1643 CCState CCInfo(CallConv, isVarArg, MF, RVLocs,
Context);
1644 return CCInfo.CheckReturn(Outs, RetCC_AVR_BUILTIN);
1648 return TotalBytes <= (unsigned)(
Subtarget.hasTinyEncoding() ? 4 : 8);
1661 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1664 MachineFunction &MF = DAG.getMachineFunction();
1668 CCInfo.AnalyzeReturn(Outs, RetCC_AVR_BUILTIN);
1676 for (
unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1677 CCValAssign &VA = RVLocs[i];
1678 assert(VA.isRegLoc() &&
"Can only return in registers!");
1680 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Glue);
1683 Glue = Chain.getValue(1);
1684 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
1689 if (MF.getFunction().getAttributes().hasFnAttr(Attribute::Naked)) {
1693 const AVRMachineFunctionInfo *AFI = MF.getInfo<AVRMachineFunctionInfo>();
1695 if (!AFI->isInterruptOrSignalHandler()) {
1700 RetOps.push_back(DAG.getRegister(
Subtarget.getZeroRegister(), MVT::i8));
1704 AFI->isInterruptOrSignalHandler() ? AVRISD::RETI_GLUE : AVRISD::RET_GLUE;
1708 if (Glue.getNode()) {
1709 RetOps.push_back(Glue);
1712 return DAG.getNode(RetOpc, dl, MVT::Other, RetOps);
1723 const TargetRegisterClass *RC;
1724 bool HasRepeatedOperand =
false;
1727 const TargetInstrInfo &
TII = *
Subtarget.getInstrInfo();
1730 switch (
MI.getOpcode()) {
1735 RC = &AVR::GPR8RegClass;
1736 HasRepeatedOperand =
true;
1740 RC = &AVR::DREGSRegClass;
1744 RC = &AVR::GPR8RegClass;
1748 RC = &AVR::DREGSRegClass;
1752 RC = &AVR::GPR8RegClass;
1756 RC = &AVR::DREGSRegClass;
1759 Opc =
Tiny ? AVR::ROLBRdR17 : AVR::ROLBRdR1;
1760 RC = &AVR::GPR8RegClass;
1764 RC = &AVR::DREGSRegClass;
1768 RC = &AVR::GPR8RegClass;
1772 RC = &AVR::DREGSRegClass;
1776 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1779 for (
I = BB->getIterator();
I !=
F->end() && &(*
I) != BB; ++
I)
1785 MachineBasicBlock *LoopBB =
F->CreateMachineBasicBlock(LLVM_BB);
1786 MachineBasicBlock *CheckBB =
F->CreateMachineBasicBlock(LLVM_BB);
1787 MachineBasicBlock *RemBB =
F->CreateMachineBasicBlock(LLVM_BB);
1789 F->insert(
I, LoopBB);
1790 F->insert(
I, CheckBB);
1791 F->insert(
I, RemBB);
1797 RemBB->transferSuccessorsAndUpdatePHIs(BB);
1800 BB->addSuccessor(CheckBB);
1801 LoopBB->addSuccessor(CheckBB);
1802 CheckBB->addSuccessor(LoopBB);
1803 CheckBB->addSuccessor(RemBB);
1805 Register ShiftAmtReg = RI.createVirtualRegister(&AVR::GPR8RegClass);
1806 Register ShiftAmtReg2 = RI.createVirtualRegister(&AVR::GPR8RegClass);
1807 Register ShiftReg = RI.createVirtualRegister(RC);
1808 Register ShiftReg2 = RI.createVirtualRegister(RC);
1809 Register ShiftAmtSrcReg =
MI.getOperand(2).getReg();
1820 if (HasRepeatedOperand)
1821 ShiftMI.
addReg(ShiftReg);
1829 BuildMI(CheckBB, dl,
TII.get(AVR::PHI), ShiftReg)
1834 BuildMI(CheckBB, dl,
TII.get(AVR::PHI), ShiftAmtReg)
1839 BuildMI(CheckBB, dl,
TII.get(AVR::PHI), DstReg)
1845 BuildMI(CheckBB, dl,
TII.get(AVR::DECRd), ShiftAmtReg2).
addReg(ShiftAmtReg);
1848 MI.eraseFromParent();
1875 Register ZeroReg =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
1883 if (ShiftLeft && (ShiftAmt % 8) >= 6) {
1888 size_t ShiftRegsOffset = ShiftAmt / 8;
1889 size_t ShiftRegsSize = Regs.size() - ShiftRegsOffset;
1891 Regs.
slice(ShiftRegsOffset, ShiftRegsSize);
1899 Register LowByte =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
1903 if (ShiftAmt % 8 == 6) {
1905 Register NewLowByte =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
1907 LowByte = NewLowByte;
1911 for (
size_t I = 0;
I < Regs.size();
I++) {
1912 int ShiftRegsIdx =
I + 1;
1913 if (ShiftRegsIdx < (
int)ShiftRegs.
size()) {
1914 Regs[
I] = ShiftRegs[ShiftRegsIdx];
1915 }
else if (ShiftRegsIdx == (
int)ShiftRegs.
size()) {
1916 Regs[
I] = std::pair(LowByte, 0);
1918 Regs[
I] = std::pair(ZeroReg, 0);
1926 if (!ShiftLeft && (ShiftAmt % 8) >= 6) {
1929 size_t ShiftRegsSize = Regs.size() - (ShiftAmt / 8);
1931 Regs.
slice(0, ShiftRegsSize);
1940 Register HighByte =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
1942 if (ArithmeticShift) {
1960 if (ShiftAmt % 8 == 6) {
1963 Register NewExt =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
1971 for (
int I = Regs.size() - 1;
I >= 0;
I--) {
1972 int ShiftRegsIdx =
I - (Regs.size() - ShiftRegs.
size()) - 1;
1973 if (ShiftRegsIdx >= 0) {
1974 Regs[
I] = ShiftRegs[ShiftRegsIdx];
1975 }
else if (ShiftRegsIdx == -1) {
1976 Regs[
I] = std::pair(HighByte, 0);
1978 Regs[
I] = std::pair(ExtByte, 0);
1987 while (ShiftLeft && ShiftAmt >= 8) {
1989 for (
size_t I = 0;
I < Regs.size() - 1;
I++) {
1990 Regs[
I] = Regs[
I + 1];
1994 Regs[Regs.size() - 1] = std::pair(ZeroReg, 0);
1997 Regs = Regs.drop_back(1);
2004 if (!ShiftLeft && ShiftAmt >= 8) {
2005 if (ArithmeticShift) {
2007 ShrExtendReg =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
2008 Register Tmp =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
2010 .
addReg(Regs[0].first, {}, Regs[0].second)
2011 .addReg(Regs[0].first, {}, Regs[0].second);
2012 BuildMI(*BB,
MI, dl,
TII.get(AVR::SBCRdRr), ShrExtendReg)
2016 ShrExtendReg = ZeroReg;
2018 for (; ShiftAmt >= 8; ShiftAmt -= 8) {
2020 for (
size_t I = Regs.size() - 1;
I != 0;
I--) {
2021 Regs[
I] = Regs[
I - 1];
2025 Regs[0] = std::pair(ShrExtendReg, 0);
2028 Regs = Regs.drop_front(1);
2033 assert((ShiftAmt < 8) &&
"Unexpect shift amount");
2053 if (!ArithmeticShift && ShiftAmt >= 4) {
2055 for (
size_t I = 0;
I < Regs.size();
I++) {
2056 size_t Idx = ShiftLeft ?
I : Regs.size() -
I - 1;
2057 Register SwapReg =
MRI.createVirtualRegister(&AVR::LD8RegClass);
2059 .
addReg(Regs[Idx].first, {}, Regs[Idx].second);
2061 Register R =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
2067 Register AndReg =
MRI.createVirtualRegister(&AVR::LD8RegClass);
2070 .
addImm(ShiftLeft ? 0xf0 : 0x0f);
2072 Register R =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
2076 size_t PrevIdx = ShiftLeft ? Idx - 1 : Idx + 1;
2077 Regs[PrevIdx] = std::pair(R, 0);
2080 Regs[Idx] = std::pair(AndReg, 0);
2087 while (ShiftLeft && ShiftAmt) {
2089 for (ssize_t
I = Regs.size() - 1;
I >= 0;
I--) {
2090 Register Out =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
2093 if (
I == (ssize_t)Regs.size() - 1) {
2095 .
addReg(In, {}, InSubreg)
2096 .addReg(In, {}, InSubreg);
2099 .
addReg(In, {}, InSubreg)
2100 .addReg(In, {}, InSubreg);
2102 Regs[
I] = std::pair(Out, 0);
2106 while (!ShiftLeft && ShiftAmt) {
2108 for (
size_t I = 0;
I < Regs.size();
I++) {
2109 Register Out =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
2113 unsigned Opc = ArithmeticShift ? AVR::ASRRd : AVR::LSRRd;
2118 Regs[
I] = std::pair(Out, 0);
2123 if (ShiftAmt != 0) {
2130AVRTargetLowering::insertWideShift(MachineInstr &
MI,
2131 MachineBasicBlock *BB)
const {
2137 int64_t ShiftAmt =
MI.getOperand(4).getImm();
2139 switch (
MI.getOpcode()) {
2152 std::array<std::pair<Register, int>, 4>
Registers = {
2153 std::pair(
MI.getOperand(3).getReg(), AVR::sub_hi),
2154 std::pair(
MI.getOperand(3).getReg(), AVR::sub_lo),
2155 std::pair(
MI.getOperand(2).getReg(), AVR::sub_hi),
2156 std::pair(
MI.getOperand(2).getReg(), AVR::sub_lo),
2174 (
Opc !=
ISD::SRA || (ShiftAmt < 16 || ShiftAmt >= 22))) {
2176 BuildMI(*BB,
MI, dl,
TII.get(AVR::REG_SEQUENCE),
MI.getOperand(0).getReg())
2178 .addImm(AVR::sub_lo)
2180 .addImm(AVR::sub_hi);
2181 BuildMI(*BB,
MI, dl,
TII.get(AVR::REG_SEQUENCE),
MI.getOperand(1).getReg())
2183 .addImm(AVR::sub_lo)
2185 .addImm(AVR::sub_hi);
2188 BuildMI(*BB,
MI, dl,
TII.get(AVR::REG_SEQUENCE),
MI.getOperand(1).getReg())
2190 .addImm(AVR::sub_hi)
2192 .addImm(AVR::sub_lo);
2193 BuildMI(*BB,
MI, dl,
TII.get(AVR::REG_SEQUENCE),
MI.getOperand(0).getReg())
2195 .addImm(AVR::sub_hi)
2197 .addImm(AVR::sub_lo);
2201 MI.eraseFromParent();
2206 if (
I->getOpcode() == AVR::COPY) {
2207 Register SrcReg =
I->getOperand(1).getReg();
2208 return (SrcReg == AVR::R0 || SrcReg == AVR::R1);
2217MachineBasicBlock *AVRTargetLowering::insertMul(MachineInstr &
MI,
2218 MachineBasicBlock *BB)
const {
2226 BuildMI(*BB,
I,
MI.getDebugLoc(),
TII.get(AVR::EORRdRr), AVR::R1)
2236 const TargetInstrInfo &
TII = *
Subtarget.getInstrInfo();
2239 .
add(
MI.getOperand(0))
2241 MI.eraseFromParent();
2249 MachineRegisterInfo &
MRI = BB->getParent()->getRegInfo();
2250 const TargetInstrInfo &
TII = *
Subtarget.getInstrInfo();
2263 const TargetRegisterClass *RC =
2264 (Width == 8) ? &AVR::GPR8RegClass : &AVR::DREGSRegClass;
2265 unsigned LoadOpcode = (Width == 8) ? AVR::LDRdPtr : AVR::LDWRdPtr;
2266 unsigned StoreOpcode = (Width == 8) ? AVR::STPtrRr : AVR::STWPtrRr;
2274 BuildMI(*BB,
I, dl,
TII.get(LoadOpcode),
MI.getOperand(0).getReg())
2275 .
add(
MI.getOperand(1));
2281 .
add(
MI.getOperand(2));
2285 .
add(
MI.getOperand(1))
2294 MI.eraseFromParent();
2301 int Opc =
MI.getOpcode();
2317 return insertShift(
MI,
MBB, STI.hasTinyEncoding());
2321 return insertWideShift(
MI,
MBB);
2324 return insertMul(
MI,
MBB);
2326 return insertCopyZero(
MI,
MBB);
2327 case AVR::AtomicLoadAdd8:
2328 return insertAtomicArithmeticOp(
MI,
MBB, AVR::ADDRdRr, 8);
2329 case AVR::AtomicLoadAdd16:
2330 return insertAtomicArithmeticOp(
MI,
MBB, AVR::ADDWRdRr, 16);
2331 case AVR::AtomicLoadSub8:
2332 return insertAtomicArithmeticOp(
MI,
MBB, AVR::SUBRdRr, 8);
2333 case AVR::AtomicLoadSub16:
2334 return insertAtomicArithmeticOp(
MI,
MBB, AVR::SUBWRdRr, 16);
2335 case AVR::AtomicLoadAnd8:
2336 return insertAtomicArithmeticOp(
MI,
MBB, AVR::ANDRdRr, 8);
2337 case AVR::AtomicLoadAnd16:
2338 return insertAtomicArithmeticOp(
MI,
MBB, AVR::ANDWRdRr, 16);
2339 case AVR::AtomicLoadOr8:
2340 return insertAtomicArithmeticOp(
MI,
MBB, AVR::ORRdRr, 8);
2341 case AVR::AtomicLoadOr16:
2342 return insertAtomicArithmeticOp(
MI,
MBB, AVR::ORWRdRr, 16);
2343 case AVR::AtomicLoadXor8:
2344 return insertAtomicArithmeticOp(
MI,
MBB, AVR::EORRdRr, 8);
2345 case AVR::AtomicLoadXor16:
2346 return insertAtomicArithmeticOp(
MI,
MBB, AVR::EORWRdRr, 16);
2349 assert((
Opc == AVR::Select16 ||
Opc == AVR::Select8) &&
2350 "Unexpected instr type to insert");
2371 if (FallThrough !=
nullptr) {
2387 unsigned CallFrameSize =
TII.getCallFrameSizeAt(
MI);
2401 MBB->addSuccessor(falseMBB);
2402 MBB->addSuccessor(trueMBB);
2410 MI.getOperand(0).getReg())
2416 MI.eraseFromParent();
2426 if (Constraint.
size() == 1) {
2428 switch (Constraint[0]) {
2471 switch (ConstraintCode[0]) {
2482 Value *CallOperandVal =
info.CallOperandVal;
2487 if (!CallOperandVal) {
2492 switch (*constraint) {
2531 if ((
C->getSExtValue() >= -63) && (
C->getSExtValue() <= 0)) {
2538 if (
C->getZExtValue() == 2) {
2545 if (
C->getZExtValue() == 0) {
2559 if (
C->getSExtValue() == -1) {
2566 if ((
C->getZExtValue() == 8) || (
C->getZExtValue() == 16) ||
2567 (
C->getZExtValue() == 24)) {
2574 if (
C->getZExtValue() == 1) {
2581 if ((
C->getSExtValue() >= -6) && (
C->getSExtValue() <= 5)) {
2594std::pair<unsigned, const TargetRegisterClass *>
2598 if (Constraint.
size() == 1) {
2599 switch (Constraint[0]) {
2602 return std::make_pair(0U, &AVR::LD8loRegClass);
2603 else if (VT == MVT::i16)
2604 return std::make_pair(0U, &AVR::DREGSLD8loRegClass);
2607 if (VT == MVT::i8 || VT == MVT::i16)
2608 return std::make_pair(0U, &AVR::PTRDISPREGSRegClass);
2612 return std::make_pair(0U, &AVR::LD8RegClass);
2613 else if (VT == MVT::i16)
2614 return std::make_pair(0U, &AVR::DLDREGSRegClass);
2618 return std::make_pair(0U, &AVR::GPR8loRegClass);
2619 else if (VT == MVT::i16)
2620 return std::make_pair(0U, &AVR::DREGSloRegClass);
2623 if (VT == MVT::i8 || VT == MVT::i16)
2624 return std::make_pair(0U, &AVR::PTRREGSRegClass);
2627 return std::make_pair(0U, &AVR::GPRSPRegClass);
2630 return std::make_pair(0U, &AVR::GPR8RegClass);
2631 else if (VT == MVT::i16)
2632 return std::make_pair(0U, &AVR::DREGSRegClass);
2636 return std::make_pair(
unsigned(
Subtarget.getTmpRegister()),
2637 &AVR::GPR8RegClass);
2640 if (VT == MVT::i8 || VT == MVT::i16)
2641 return std::make_pair(0U, &AVR::IWREGSRegClass);
2645 if (VT == MVT::i8 || VT == MVT::i16)
2646 return std::make_pair(
unsigned(AVR::R27R26), &AVR::PTRREGSRegClass);
2650 if (VT == MVT::i8 || VT == MVT::i16)
2651 return std::make_pair(
unsigned(AVR::R29R28), &AVR::PTRREGSRegClass);
2655 if (VT == MVT::i8 || VT == MVT::i16)
2656 return std::make_pair(
unsigned(AVR::R31R30), &AVR::PTRREGSRegClass);
2664 Subtarget.getRegisterInfo(), Constraint, VT);
2669 std::vector<SDValue> &
Ops,
2673 EVT Ty =
Op.getValueType();
2676 if (Constraint.
size() != 1) {
2680 char ConstraintLetter = Constraint[0];
2681 switch (ConstraintLetter) {
2699 int64_t CVal64 =
C->getSExtValue();
2701 switch (ConstraintLetter) {
2708 if (CVal64 < -63 || CVal64 > 0)
2728 if (Ty.getSimpleVT() == MVT::i8) {
2739 if (CUVal64 != 8 && CUVal64 != 16 && CUVal64 != 24)
2749 if (CVal64 < -6 || CVal64 > 5)
2759 if (!FC || !FC->isZero())
2766 if (Result.getNode()) {
2767 Ops.push_back(Result);
2780 .
Case(
"r0", AVR::R0)
2781 .
Case(
"r1", AVR::R1)
2785 .
Case(
"r0", AVR::R1R0)
2786 .
Case(
"sp", AVR::SP)
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
const HexagonInstrInfo * TII
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Register const TargetRegisterInfo * TRI
Promote Memory to Register
SI Pre allocate WWM Registers
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
Utilities related to the AVR instruction set.
A specific AVR target MCU.
Register getZeroRegister() const
const AVRInstrInfo * getInstrInfo() const override
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
Replace a node with an illegal result type with a new node built out of custom code.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
Returns true by value, base pointer and offset pointer and addressing mode by reference if the node's...
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
ConstraintType getConstraintType(StringRef Constraint) const override
Given a constraint, return the type of constraint it is for this target.
const AVRSubtarget & Subtarget
InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const override
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
AVRTargetLowering(const AVRTargetMachine &TM, const AVRSubtarget &STI)
void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
Lower the specified operand into the Ops vector.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
Return the ValueType of the result of SETCC operations.
bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
Returns true by value, base pointer and offset pointer and addressing mode by reference if this node ...
A generic AVR implementation.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
LLVM Basic Block Representation.
CCState - This class holds information needed while lowering arguments and return values.
MCRegister AllocateReg(MCPhysReg Reg)
AllocateReg - Attempt to allocate one register.
LLVMContext & getContext() const
int64_t AllocateStack(unsigned Size, Align Alignment)
AllocateStack - Allocate a chunk of stack space with the specified size and alignment.
void addLoc(const CCValAssign &V)
static CCValAssign getReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP, bool IsCustom=false)
static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)
ConstantFP - Floating Point Values [float, double].
This is the shared class of boolean and integer constants.
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
LLVM_ABI Align getABITypeAlign(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
LLVM_ABI TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
This is an important class for using LLVM in a threaded context.
This class is used to represent ISD::LOAD nodes.
static auto integer_valuetypes()
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
LLVM_ABI void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
void setCallFrameSize(unsigned N)
Set the call frame size on entry to this basic block.
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
BasicBlockListType::iterator iterator
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineInstr - Allocate a new MachineInstr.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
MutableArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDValue getValue(unsigned R) const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
LLVM_ABI SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
This class is used to represent ISD::STORE nodes.
StringRef - Represent a constant reference to a string, i.e.
constexpr size_t size() const
size - Get the string size.
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
static LLVM_ABI StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
TargetInstrInfo - Interface to description of machine instruction set.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
void setIndexedLoadAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed load does or does not work with the specified type and indicate w...
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
void setIndexedStoreAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed store does or does not work with the specified type and indicate ...
void setSupportsUnalignedAtomics(bool UnalignedSupported)
Sets whether unaligned atomic operations are supported.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setMinimumJumpTableEntries(unsigned Val)
Indicate the minimum number of blocks to generate jump tables.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
@ ZeroOrOneBooleanContent
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
std::vector< ArgListEntry > ArgListTy
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
TargetLowering(const TargetLowering &)=delete
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetInstrInfo * getInstrInfo() const
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM Value Representation.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
CondCodes
AVR specific condition codes.
@ COND_SH
Unsigned same or higher.
@ COND_GE
Greater than or equal.
bool isProgramMemoryAccess(MemSDNode const *N)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ AVR_BUILTIN
Used for special AVR rtlib functions which have an "optimized" convention to preserve registers.
@ C
The default llvm calling convention, compatible with C.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ BSWAP
Byte Swap and Counting operators.
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
@ ADD
Simple integer binary arithmetic operators.
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ SIGN_EXTEND
Conversion operators.
@ BR_CC
BR_CC - Conditional branch.
@ BR_JT
BR_JT - Jumptable branch.
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ BasicBlock
Various leaf nodes.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ ATOMIC_CMP_SWAP
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo,...
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
@ INLINEASM
INLINEASM - Represents an inline asm block.
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ BRCOND
BRCOND - Conditional branch.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
This is an optimization pass for GlobalISel generic memory operations.
FunctionAddr VTableAddr Value
static void analyzeReturnValues(const SmallVectorImpl< ArgT > &Args, CCState &CCInfo, bool Tiny)
Analyze incoming and outgoing value of returning from a function.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
@ Undef
Value of the register doesn't matter.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
static const MCPhysReg RegList16Tiny[]
constexpr bool has_single_bit(T Value) noexcept
static const MCPhysReg RegList8Tiny[]
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
static void analyzeArguments(TargetLowering::CallLoweringInfo *CLI, const Function *F, const DataLayout *TD, const SmallVectorImpl< ArgT > &Args, SmallVectorImpl< CCValAssign > &ArgLocs, CCState &CCInfo, bool Tiny)
Analyze incoming and outgoing function arguments.
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
static const MCPhysReg RegList16AVR[]
@ Sub
Subtraction of integers.
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
static unsigned getTotalArgumentsSizeInBytes(const SmallVectorImpl< ArgT > &Args)
Count the total number of bytes needed to pass or return these arguments.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
DWARFExpression::Operation Op
static AVRCC::CondCodes intCCToAVRCC(ISD::CondCode CC)
IntCCToAVRCC - Convert a DAG integer condition code to an AVR CC.
ArrayRef(const T &OneElt) -> ArrayRef< T >
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
static bool isCopyMulResult(MachineBasicBlock::iterator const &I)
static void insertMultibyteShift(MachineInstr &MI, MachineBasicBlock *BB, MutableArrayRef< std::pair< Register, int > > Regs, ISD::NodeType Opc, int64_t ShiftAmt)
static const MCPhysReg RegList8AVR[]
Registers for calling conventions, ordered in reverse as required by ABI.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
bool isVector() const
Return true if this is a vector value type.
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
static LLVM_ABI MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
This contains information for each constraint that we are lowering.
This structure contains all information that is necessary for lowering calls.