177 if (!
Subtarget.supportsMultiplication()) {
266 EVT VT =
Op.getValueType();
269 "Expected power-of-2 shift amount");
272 if (!isa<ConstantSDNode>(
N->getOperand(1))) {
277 SDVTList ResTys = DAG.
getVTList(MVT::i16, MVT::i16);
284 uint64_t ShiftAmount =
N->getConstantOperandVal(1);
285 if (ShiftAmount == 16) {
290 switch (
Op.getOpcode()) {
303 switch (
Op.getOpcode()) {
316 SDValue
Result = DAG.
getNode(Opc, dl, ResTys, SrcLo, SrcHi, Cnt);
322 if (!isa<ConstantSDNode>(
N->getOperand(1))) {
323 switch (
Op.getOpcode()) {
333 SDValue Amt =
N->getOperand(1);
334 EVT AmtVT = Amt.getValueType();
340 SDValue Amt =
N->getOperand(1);
341 EVT AmtVT = Amt.getValueType();
352 uint64_t ShiftAmount =
N->getConstantOperandVal(1);
353 SDValue Victim =
N->getOperand(0);
355 switch (
Op.getOpcode()) {
379 if (
Op.getOpcode() ==
ISD::SHL && 4 <= ShiftAmount && ShiftAmount < 7) {
385 }
else if (
Op.getOpcode() ==
ISD::SRL && 4 <= ShiftAmount &&
392 }
else if (
Op.getOpcode() ==
ISD::SHL && ShiftAmount == 7) {
397 }
else if (
Op.getOpcode() ==
ISD::SRL && ShiftAmount == 7) {
402 }
else if (
Op.getOpcode() ==
ISD::SRA && ShiftAmount == 6) {
407 }
else if (
Op.getOpcode() ==
ISD::SRA && ShiftAmount == 7) {
412 }
else if (
Op.getOpcode() ==
ISD::ROTL && ShiftAmount == 3) {
417 }
else if (
Op.getOpcode() ==
ISD::ROTR && ShiftAmount == 3) {
422 }
else if (
Op.getOpcode() ==
ISD::ROTL && ShiftAmount == 7) {
426 }
else if (
Op.getOpcode() ==
ISD::ROTR && ShiftAmount == 7) {
439 switch (ShiftAmount) {
458 if (4 <= ShiftAmount && ShiftAmount < 8)
459 switch (
Op.getOpcode()) {
473 else if (8 <= ShiftAmount && ShiftAmount < 12)
474 switch (
Op.getOpcode()) {
499 else if (12 <= ShiftAmount)
500 switch (
Op.getOpcode()) {
527 while (ShiftAmount--) {
528 Victim = DAG.
getNode(Opc8, dl, VT, Victim);
534SDValue AVRTargetLowering::LowerDivRem(SDValue
Op, SelectionDAG &DAG)
const {
535 unsigned Opcode =
Op->getOpcode();
537 "Invalid opcode for Div/Rem lowering");
539 EVT VT =
Op->getValueType(0);
540 Type *Ty = VT.getTypeForEVT(*DAG.getContext());
543 switch (VT.getSimpleVT().SimpleTy) {
547 LC = IsSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8;
550 LC = IsSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16;
553 LC = IsSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32;
557 SDValue InChain = DAG.getEntryNode();
560 TargetLowering::ArgListEntry
Entry;
561 for (SDValue
const &Value :
Op->op_values()) {
563 Entry.Ty =
Value.getValueType().getTypeForEVT(*DAG.getContext());
564 Entry.IsSExt = IsSigned;
565 Entry.IsZExt = !IsSigned;
566 Args.push_back(Entry);
575 TargetLowering::CallLoweringInfo CLI(DAG);
580 .setSExtResult(IsSigned)
581 .setZExtResult(!IsSigned);
583 std::pair<SDValue, SDValue> CallInfo =
LowerCallTo(CLI);
584 return CallInfo.first;
587SDValue AVRTargetLowering::LowerGlobalAddress(SDValue
Op,
588 SelectionDAG &DAG)
const {
589 auto DL = DAG.getDataLayout();
591 const GlobalValue *GV = cast<GlobalAddressSDNode>(
Op)->getGlobal();
592 int64_t
Offset = cast<GlobalAddressSDNode>(
Op)->getOffset();
600SDValue AVRTargetLowering::LowerBlockAddress(SDValue
Op,
601 SelectionDAG &DAG)
const {
602 auto DL = DAG.getDataLayout();
603 const BlockAddress *BA = cast<BlockAddressSDNode>(
Op)->getBlockAddress();
631SDValue AVRTargetLowering::getAVRCmp(SDValue
LHS, SDValue
RHS,
632 SelectionDAG &DAG, SDLoc
DL)
const {
633 assert((
LHS.getSimpleValueType() ==
RHS.getSimpleValueType()) &&
634 "LHS and RHS have different types");
635 assert(((
LHS.getSimpleValueType() == MVT::i16) ||
636 (
LHS.getSimpleValueType() == MVT::i8)) &&
637 "invalid comparison type");
641 if (
LHS.getSimpleValueType() == MVT::i16 && isa<ConstantSDNode>(
RHS)) {
646 DAG.getIntPtrConstant(0,
DL));
648 DAG.getIntPtrConstant(1,
DL));
649 SDValue RHSlo = (Imm & 0xff) == 0
652 DAG.getIntPtrConstant(0,
DL));
653 SDValue RHShi = (Imm & 0xff00) == 0
656 DAG.getIntPtrConstant(1,
DL));
658 Cmp = DAG.getNode(
AVRISD::CMPC,
DL, MVT::Glue, LHShi, RHShi, Cmp);
659 }
else if (
RHS.getSimpleValueType() == MVT::i16 && isa<ConstantSDNode>(
LHS)) {
663 SDValue LHSlo = (
Imm & 0xff) == 0
666 DAG.getIntPtrConstant(0,
DL));
667 SDValue LHShi = (
Imm & 0xff00) == 0
670 DAG.getIntPtrConstant(1,
DL));
672 DAG.getIntPtrConstant(0,
DL));
674 DAG.getIntPtrConstant(1,
DL));
688 SDValue &AVRcc, SelectionDAG &DAG,
691 EVT VT =
LHS.getValueType();
692 bool UseTest =
false;
704 if (
const ConstantSDNode *
C = dyn_cast<ConstantSDNode>(
RHS)) {
705 switch (
C->getSExtValue()) {
717 LHS = DAG.getConstant(0,
DL, VT);
724 RHS = DAG.getConstant(
C->getSExtValue() + 1,
DL, VT);
737 if (
const ConstantSDNode *
C = dyn_cast<ConstantSDNode>(
RHS)) {
738 switch (
C->getSExtValue()) {
743 LHS = DAG.getConstant(0,
DL, VT);
767 if (
const ConstantSDNode *
C = dyn_cast<ConstantSDNode>(
RHS)) {
768 RHS = DAG.getConstant(
C->getSExtValue() + 1,
DL, VT);
781 if (VT == MVT::i32) {
783 DAG.getIntPtrConstant(0,
DL));
785 DAG.getIntPtrConstant(1,
DL));
787 DAG.getIntPtrConstant(0,
DL));
789 DAG.getIntPtrConstant(1,
DL));
794 DAG.getIntPtrConstant(1,
DL));
797 Cmp = getAVRCmp(LHSlo, RHSlo, DAG,
DL);
800 }
else if (VT == MVT::i64) {
802 DAG.getIntPtrConstant(0,
DL));
804 DAG.getIntPtrConstant(1,
DL));
807 DAG.getIntPtrConstant(0,
DL));
809 DAG.getIntPtrConstant(1,
DL));
811 DAG.getIntPtrConstant(0,
DL));
813 DAG.getIntPtrConstant(1,
DL));
816 DAG.getIntPtrConstant(0,
DL));
818 DAG.getIntPtrConstant(1,
DL));
821 DAG.getIntPtrConstant(0,
DL));
823 DAG.getIntPtrConstant(1,
DL));
825 DAG.getIntPtrConstant(0,
DL));
827 DAG.getIntPtrConstant(1,
DL));
832 DAG.getIntPtrConstant(1,
DL));
835 Cmp = getAVRCmp(LHS0, RHS0, DAG,
DL);
840 }
else if (VT == MVT::i8 || VT == MVT::i16) {
847 LHS, DAG.getIntPtrConstant(1,
DL)));
863SDValue AVRTargetLowering::LowerBR_CC(SDValue
Op, SelectionDAG &DAG)
const {
864 SDValue Chain =
Op.getOperand(0);
866 SDValue
LHS =
Op.getOperand(2);
867 SDValue
RHS =
Op.getOperand(3);
868 SDValue Dest =
Op.getOperand(4);
872 SDValue
Cmp = getAVRCmp(
LHS,
RHS,
CC, TargetCC, DAG, dl);
874 return DAG.getNode(
AVRISD::BRCOND, dl, MVT::Other, Chain, Dest, TargetCC,
878SDValue AVRTargetLowering::LowerSELECT_CC(SDValue
Op, SelectionDAG &DAG)
const {
879 SDValue
LHS =
Op.getOperand(0);
880 SDValue
RHS =
Op.getOperand(1);
881 SDValue TrueV =
Op.getOperand(2);
882 SDValue FalseV =
Op.getOperand(3);
887 SDValue
Cmp = getAVRCmp(
LHS,
RHS,
CC, TargetCC, DAG, dl);
889 SDValue Ops[] = {TrueV, FalseV, TargetCC,
Cmp};
894SDValue AVRTargetLowering::LowerSETCC(SDValue
Op, SelectionDAG &DAG)
const {
895 SDValue
LHS =
Op.getOperand(0);
896 SDValue
RHS =
Op.getOperand(1);
903 SDValue TrueV = DAG.getConstant(1,
DL,
Op.getValueType());
904 SDValue FalseV = DAG.getConstant(0,
DL,
Op.getValueType());
905 SDValue Ops[] = {TrueV, FalseV, TargetCC,
Cmp};
910SDValue AVRTargetLowering::LowerVASTART(SDValue
Op, SelectionDAG &DAG)
const {
911 const MachineFunction &MF = DAG.getMachineFunction();
912 const AVRMachineFunctionInfo *AFI = MF.getInfo<AVRMachineFunctionInfo>();
913 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
914 auto DL = DAG.getDataLayout();
919 SDValue FI = DAG.getFrameIndex(AFI->getVarArgsFrameIndex(),
getPointerTy(
DL));
921 return DAG.getStore(
Op.getOperand(0), dl, FI,
Op.getOperand(1),
922 MachinePointerInfo(SV));
926SDValue AVRTargetLowering::LowerINLINEASM(SDValue
Op, SelectionDAG &DAG)
const {
942 SmallVector<SDValue, 8> Ops;
943 SDNode *
N =
Op.getNode();
945 for (
unsigned I = 0;
I <
N->getNumOperands();
I++) {
946 SDValue Operand =
N->getOperand(
I);
947 if (Operand.getValueType() == MVT::Glue) {
952 Ops.push_back(Operand);
956 Ops.push_back(DAG.getTargetConstant(Flags, dl, MVT::i32));
957 Ops.push_back(ZeroReg);
964 SDValue
New = DAG.getNode(
N->getOpcode(), dl,
N->getVTList(), Ops);
965 DAG.ReplaceAllUsesOfValueWith(
Op, New);
966 DAG.ReplaceAllUsesOfValueWith(
Op.getValue(1),
New.getValue(1));
972 switch (
Op.getOpcode()) {
980 return LowerShifts(
Op, DAG);
982 return LowerGlobalAddress(
Op, DAG);
984 return LowerBlockAddress(
Op, DAG);
986 return LowerBR_CC(
Op, DAG);
988 return LowerSELECT_CC(
Op, DAG);
990 return LowerSETCC(
Op, DAG);
992 return LowerVASTART(
Op, DAG);
995 return LowerDivRem(
Op, DAG);
997 return LowerINLINEASM(
Op, DAG);
1010 switch (
N->getOpcode()) {
1013 if (
const ConstantSDNode *
C = dyn_cast<ConstantSDNode>(
N->getOperand(1))) {
1072 if (
const LoadSDNode *LD = dyn_cast<LoadSDNode>(
N)) {
1073 VT = LD->getMemoryVT();
1074 Op = LD->getBasePtr().getNode();
1080 }
else if (
const StoreSDNode *ST = dyn_cast<StoreSDNode>(
N)) {
1081 VT = ST->getMemoryVT();
1082 Op = ST->getBasePtr().getNode();
1090 if (VT != MVT::i8 && VT != MVT::i16) {
1099 int RHSC =
RHS->getSExtValue();
1103 if ((VT == MVT::i16 && RHSC != -2) || (VT == MVT::i8 && RHSC != -1)) {
1107 Base =
Op->getOperand(0);
1128 if (
const LoadSDNode *LD = dyn_cast<LoadSDNode>(
N)) {
1129 VT = LD->getMemoryVT();
1132 }
else if (
const StoreSDNode *ST = dyn_cast<StoreSDNode>(
N)) {
1133 VT = ST->getMemoryVT();
1141 if (VT == MVT::i16 && !
Subtarget.hasLowByteFirst())
1147 if (VT != MVT::i8 && VT != MVT::i16) {
1156 int RHSC =
RHS->getSExtValue();
1159 if ((VT == MVT::i16 && RHSC != 2) || (VT == MVT::i8 && RHSC != 1)) {
1165 if (
const LoadSDNode *LD = dyn_cast<LoadSDNode>(
N))
1169 Base =
Op->getOperand(0);
1188#include "AVRGenCallingConv.inc"
1193 AVR::R25, AVR::R24, AVR::R23, AVR::R22, AVR::R21, AVR::R20,
1194 AVR::R19, AVR::R18, AVR::R17, AVR::R16, AVR::R15, AVR::R14,
1195 AVR::R13, AVR::R12, AVR::R11, AVR::R10, AVR::R9, AVR::R8};
1197 AVR::R22, AVR::R21, AVR::R20};
1199 AVR::R26R25, AVR::R25R24, AVR::R24R23, AVR::R23R22, AVR::R22R21,
1200 AVR::R21R20, AVR::R20R19, AVR::R19R18, AVR::R18R17, AVR::R17R16,
1201 AVR::R16R15, AVR::R15R14, AVR::R14R13, AVR::R13R12, AVR::R12R11,
1202 AVR::R11R10, AVR::R10R9, AVR::R9R8};
1204 AVR::R24R23, AVR::R23R22,
1205 AVR::R22R21, AVR::R21R20};
1208 "8-bit and 16-bit register arrays must be of equal length");
1210 "8-bit and 16-bit register arrays must be of equal length");
1216template <
typename ArgT>
1233 unsigned NumArgs = Args.size();
1236 int RegLastIdx = -1;
1238 bool UseStack =
false;
1239 for (
unsigned i = 0; i != NumArgs;) {
1240 MVT VT = Args[i].VT;
1245 unsigned ArgIndex = Args[i].OrigArgIndex;
1248 for (; j != NumArgs; ++j) {
1249 if (Args[j].OrigArgIndex != ArgIndex)
1251 TotalBytes += Args[j].VT.getStoreSize();
1254 TotalBytes =
alignTo(TotalBytes, 2);
1256 if (TotalBytes == 0)
1259 unsigned RegIdx = RegLastIdx + TotalBytes;
1260 RegLastIdx = RegIdx;
1262 if (RegIdx >= RegList8.
size()) {
1265 for (; i != j; ++i) {
1266 MVT VT = Args[i].VT;
1276 if (VT == MVT::i8) {
1278 }
else if (VT == MVT::i16) {
1282 "calling convention can only manage i8 and i16 types");
1284 assert(
Reg &&
"register not available in calling convention");
1295template <
typename ArgT>
1298 unsigned TotalBytes = 0;
1300 for (
const ArgT &Arg : Args) {
1301 TotalBytes += Arg.VT.getStoreSize();
1309template <
typename ArgT>
1312 unsigned NumArgs = Args.size();
1316 assert(TotalBytes <= 4 &&
1317 "return values greater than 4 bytes cannot be lowered on AVRTiny");
1319 assert(TotalBytes <= 8 &&
1320 "return values greater than 8 bytes cannot be lowered on AVR");
1335 if (TotalBytes > 4) {
1338 TotalBytes =
alignTo(TotalBytes, 2);
1342 int RegIdx = TotalBytes - 1;
1343 for (
unsigned i = 0; i != NumArgs; ++i) {
1344 MVT VT = Args[i].VT;
1346 if (VT == MVT::i8) {
1348 }
else if (VT == MVT::i16) {
1353 assert(
Reg &&
"register not available in calling convention");
1360SDValue AVRTargetLowering::LowerFormalArguments(
1362 const SmallVectorImpl<ISD::InputArg> &Ins,
const SDLoc &dl,
1363 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals)
const {
1364 MachineFunction &MF = DAG.getMachineFunction();
1365 MachineFrameInfo &MFI = MF.getFrameInfo();
1366 auto DL = DAG.getDataLayout();
1369 SmallVector<CCValAssign, 16> ArgLocs;
1370 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1375 CCInfo.AnalyzeFormalArguments(Ins, ArgCC_AVR_Vararg);
1382 for (CCValAssign &VA : ArgLocs) {
1385 if (VA.isRegLoc()) {
1386 EVT RegVT = VA.getLocVT();
1387 const TargetRegisterClass *RC;
1388 if (RegVT == MVT::i8) {
1389 RC = &AVR::GPR8RegClass;
1390 }
else if (RegVT == MVT::i16) {
1391 RC = &AVR::DREGSRegClass;
1397 ArgValue = DAG.getCopyFromReg(Chain, dl,
Reg, RegVT);
1405 switch (VA.getLocInfo()) {
1411 ArgValue = DAG.getNode(
ISD::BITCAST, dl, VA.getValVT(), ArgValue);
1415 DAG.getValueType(VA.getValVT()));
1416 ArgValue = DAG.getNode(
ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
1420 DAG.getValueType(VA.getValVT()));
1421 ArgValue = DAG.getNode(
ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
1425 InVals.push_back(ArgValue);
1430 EVT LocVT = VA.getLocVT();
1433 int FI = MFI.CreateFixedObject(LocVT.getSizeInBits() / 8,
1434 VA.getLocMemOffset(),
true);
1439 InVals.push_back(DAG.getLoad(LocVT, dl, Chain, FIN,
1447 unsigned StackSize = CCInfo.getStackSize();
1448 AVRMachineFunctionInfo *AFI = MF.getInfo<AVRMachineFunctionInfo>();
1450 AFI->setVarArgsFrameIndex(MFI.CreateFixedObject(2, StackSize,
true));
1460SDValue AVRTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
1461 SmallVectorImpl<SDValue> &InVals)
const {
1462 SelectionDAG &DAG = CLI.DAG;
1464 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
1465 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
1466 SmallVectorImpl<ISD::InputArg> &
Ins = CLI.Ins;
1467 SDValue Chain = CLI.Chain;
1468 SDValue
Callee = CLI.Callee;
1469 bool &isTailCall = CLI.IsTailCall;
1471 bool isVarArg = CLI.IsVarArg;
1473 MachineFunction &MF = DAG.getMachineFunction();
1479 SmallVector<CCValAssign, 16> ArgLocs;
1480 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1487 if (
const GlobalAddressSDNode *
G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1488 const GlobalValue *GV =
G->getGlobal();
1489 if (isa<Function>(GV))
1490 F = cast<Function>(GV);
1492 DAG.getTargetGlobalAddress(GV,
DL,
getPointerTy(DAG.getDataLayout()));
1493 }
else if (
const ExternalSymbolSDNode *ES =
1494 dyn_cast<ExternalSymbolSDNode>(Callee)) {
1495 Callee = DAG.getTargetExternalSymbol(ES->getSymbol(),
1501 CCInfo.AnalyzeCallOperands(Outs, ArgCC_AVR_Vararg);
1508 unsigned NumBytes = CCInfo.getStackSize();
1510 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0,
DL);
1512 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
1516 bool HasStackArgs =
false;
1517 for (AI = 0, AE = ArgLocs.size(); AI != AE; ++AI) {
1518 CCValAssign &VA = ArgLocs[AI];
1519 EVT RegVT = VA.getLocVT();
1520 SDValue Arg = OutVals[AI];
1523 switch (VA.getLocInfo()) {
1544 if (VA.isMemLoc()) {
1545 HasStackArgs =
true;
1551 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1561 SmallVector<SDValue, 8> MemOpChains;
1562 for (; AI != AE; AI++) {
1563 CCValAssign &VA = ArgLocs[AI];
1564 SDValue Arg = OutVals[AI];
1569 SDValue PtrOff = DAG.getNode(
1571 DAG.getRegister(AVR::SP,
getPointerTy(DAG.getDataLayout())),
1572 DAG.getIntPtrConstant(VA.getLocMemOffset() + 1,
DL));
1574 MemOpChains.push_back(
1575 DAG.getStore(Chain,
DL, Arg, PtrOff,
1579 if (!MemOpChains.empty())
1587 for (
auto Reg : RegsToPass) {
1588 Chain = DAG.getCopyToReg(Chain,
DL,
Reg.first,
Reg.second, InGlue);
1589 InGlue = Chain.getValue(1);
1593 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1594 SmallVector<SDValue, 8> Ops;
1595 Ops.push_back(Chain);
1596 Ops.push_back(Callee);
1600 for (
auto Reg : RegsToPass) {
1601 Ops.push_back(DAG.getRegister(
Reg.first,
Reg.second.getValueType()));
1611 TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv);
1612 assert(Mask &&
"Missing call preserved mask for calling convention");
1613 Ops.push_back(DAG.getRegisterMask(Mask));
1615 if (InGlue.getNode()) {
1616 Ops.push_back(InGlue);
1620 InGlue = Chain.getValue(1);
1623 Chain = DAG.getCALLSEQ_END(Chain, NumBytes, 0, InGlue,
DL);
1626 InGlue = Chain.getValue(1);
1631 return LowerCallResult(Chain, InGlue, CallConv, isVarArg, Ins,
DL, DAG,
1638SDValue AVRTargetLowering::LowerCallResult(
1639 SDValue Chain, SDValue InGlue,
CallingConv::ID CallConv,
bool isVarArg,
1640 const SmallVectorImpl<ISD::InputArg> &Ins,
const SDLoc &dl,
1641 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals)
const {
1644 SmallVector<CCValAssign, 16> RVLocs;
1645 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1650 CCInfo.AnalyzeCallResult(Ins, RetCC_AVR_BUILTIN);
1656 for (CCValAssign
const &RVLoc : RVLocs) {
1657 Chain = DAG.getCopyFromReg(Chain, dl, RVLoc.getLocReg(), RVLoc.getValVT(),
1660 InGlue = Chain.getValue(2);
1661 InVals.push_back(Chain.getValue(0));
1671bool AVRTargetLowering::CanLowerReturn(
1673 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context)
const {
1675 SmallVector<CCValAssign, 16> RVLocs;
1676 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
1677 return CCInfo.CheckReturn(Outs, RetCC_AVR_BUILTIN);
1685AVRTargetLowering::LowerReturn(SDValue Chain,
CallingConv::ID CallConv,
1687 const SmallVectorImpl<ISD::OutputArg> &Outs,
1688 const SmallVectorImpl<SDValue> &OutVals,
1689 const SDLoc &dl, SelectionDAG &DAG)
const {
1691 SmallVector<CCValAssign, 16> RVLocs;
1694 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1697 MachineFunction &MF = DAG.getMachineFunction();
1701 CCInfo.AnalyzeReturn(Outs, RetCC_AVR_BUILTIN);
1707 SmallVector<SDValue, 4> RetOps(1, Chain);
1709 for (
unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1710 CCValAssign &VA = RVLocs[i];
1711 assert(VA.isRegLoc() &&
"Can only return in registers!");
1713 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Glue);
1716 Glue = Chain.getValue(1);
1717 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
1722 if (MF.getFunction().getAttributes().hasFnAttr(Attribute::Naked)) {
1726 const AVRMachineFunctionInfo *AFI = MF.getInfo<AVRMachineFunctionInfo>();
1728 if (!AFI->isInterruptOrSignalHandler()) {
1741 if (Glue.getNode()) {
1742 RetOps.push_back(Glue);
1745 return DAG.getNode(RetOpc, dl, MVT::Other, RetOps);
1752MachineBasicBlock *AVRTargetLowering::insertShift(MachineInstr &
MI,
1753 MachineBasicBlock *BB,
1756 const TargetRegisterClass *RC;
1757 bool HasRepeatedOperand =
false;
1758 MachineFunction *
F = BB->getParent();
1759 MachineRegisterInfo &RI =
F->getRegInfo();
1763 switch (
MI.getOpcode()) {
1768 RC = &AVR::GPR8RegClass;
1769 HasRepeatedOperand =
true;
1773 RC = &AVR::DREGSRegClass;
1777 RC = &AVR::GPR8RegClass;
1781 RC = &AVR::DREGSRegClass;
1785 RC = &AVR::GPR8RegClass;
1789 RC = &AVR::DREGSRegClass;
1792 Opc =
Tiny ? AVR::ROLBRdR17 : AVR::ROLBRdR1;
1793 RC = &AVR::GPR8RegClass;
1797 RC = &AVR::DREGSRegClass;
1801 RC = &AVR::GPR8RegClass;
1805 RC = &AVR::DREGSRegClass;
1809 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1812 for (
I = BB->getIterator();
I !=
F->end() && &(*
I) != BB; ++
I)
1818 MachineBasicBlock *LoopBB =
F->CreateMachineBasicBlock(LLVM_BB);
1819 MachineBasicBlock *CheckBB =
F->CreateMachineBasicBlock(LLVM_BB);
1820 MachineBasicBlock *RemBB =
F->CreateMachineBasicBlock(LLVM_BB);
1822 F->insert(
I, LoopBB);
1823 F->insert(
I, CheckBB);
1824 F->insert(
I, RemBB);
1830 RemBB->transferSuccessorsAndUpdatePHIs(BB);
1833 BB->addSuccessor(CheckBB);
1834 LoopBB->addSuccessor(CheckBB);
1835 CheckBB->addSuccessor(LoopBB);
1836 CheckBB->addSuccessor(RemBB);
1838 Register ShiftAmtReg = RI.createVirtualRegister(&AVR::GPR8RegClass);
1839 Register ShiftAmtReg2 = RI.createVirtualRegister(&AVR::GPR8RegClass);
1840 Register ShiftReg = RI.createVirtualRegister(RC);
1841 Register ShiftReg2 = RI.createVirtualRegister(RC);
1842 Register ShiftAmtSrcReg =
MI.getOperand(2).getReg();
1852 auto ShiftMI =
BuildMI(LoopBB, dl,
TII.get(Opc), ShiftReg2).
addReg(ShiftReg);
1853 if (HasRepeatedOperand)
1854 ShiftMI.
addReg(ShiftReg);
1862 BuildMI(CheckBB, dl,
TII.get(AVR::PHI), ShiftReg)
1867 BuildMI(CheckBB, dl,
TII.get(AVR::PHI), ShiftAmtReg)
1872 BuildMI(CheckBB, dl,
TII.get(AVR::PHI), DstReg)
1878 BuildMI(CheckBB, dl,
TII.get(AVR::DECRd), ShiftAmtReg2).
addReg(ShiftAmtReg);
1881 MI.eraseFromParent();
1904 const bool ShiftLeft = Opc ==
ISD::SHL;
1905 const bool ArithmeticShift = Opc ==
ISD::SRA;
1908 Register ZeroReg =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
1916 if (ShiftLeft && (ShiftAmt % 8) >= 6) {
1921 size_t ShiftRegsOffset = ShiftAmt / 8;
1922 size_t ShiftRegsSize = Regs.size() - ShiftRegsOffset;
1924 Regs.
slice(ShiftRegsOffset, ShiftRegsSize);
1932 Register LowByte =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
1936 if (ShiftAmt % 8 == 6) {
1938 Register NewLowByte =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
1940 LowByte = NewLowByte;
1944 for (
size_t I = 0;
I < Regs.size();
I++) {
1945 int ShiftRegsIdx =
I + 1;
1946 if (ShiftRegsIdx < (
int)ShiftRegs.
size()) {
1947 Regs[
I] = ShiftRegs[ShiftRegsIdx];
1948 }
else if (ShiftRegsIdx == (
int)ShiftRegs.
size()) {
1949 Regs[
I] = std::pair(LowByte, 0);
1951 Regs[
I] = std::pair(ZeroReg, 0);
1959 if (!ShiftLeft && (ShiftAmt % 8) >= 6) {
1962 size_t ShiftRegsSize = Regs.size() - (ShiftAmt / 8);
1964 Regs.
slice(0, ShiftRegsSize);
1973 Register HighByte =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
1975 if (ArithmeticShift) {
1993 if (ShiftAmt % 8 == 6) {
1996 Register NewExt =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
2004 for (
int I = Regs.size() - 1;
I >= 0;
I--) {
2005 int ShiftRegsIdx =
I - (Regs.size() - ShiftRegs.
size()) - 1;
2006 if (ShiftRegsIdx >= 0) {
2007 Regs[
I] = ShiftRegs[ShiftRegsIdx];
2008 }
else if (ShiftRegsIdx == -1) {
2009 Regs[
I] = std::pair(HighByte, 0);
2011 Regs[
I] = std::pair(ExtByte, 0);
2020 while (ShiftLeft && ShiftAmt >= 8) {
2022 for (
size_t I = 0;
I < Regs.size() - 1;
I++) {
2023 Regs[
I] = Regs[
I + 1];
2027 Regs[Regs.size() - 1] = std::pair(ZeroReg, 0);
2030 Regs = Regs.drop_back(1);
2037 if (!ShiftLeft && ShiftAmt >= 8) {
2038 if (ArithmeticShift) {
2040 ShrExtendReg =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
2041 Register Tmp =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
2043 .
addReg(Regs[0].first, 0, Regs[0].second)
2044 .
addReg(Regs[0].first, 0, Regs[0].second);
2045 BuildMI(*BB,
MI, dl,
TII.get(AVR::SBCRdRr), ShrExtendReg)
2049 ShrExtendReg = ZeroReg;
2051 for (; ShiftAmt >= 8; ShiftAmt -= 8) {
2053 for (
size_t I = Regs.size() - 1;
I != 0;
I--) {
2054 Regs[
I] = Regs[
I - 1];
2058 Regs[0] = std::pair(ShrExtendReg, 0);
2061 Regs = Regs.drop_front(1);
2066 assert((ShiftAmt < 8) &&
"Unexpect shift amount");
2086 if (!ArithmeticShift && ShiftAmt >= 4) {
2088 for (
size_t I = 0;
I < Regs.size();
I++) {
2089 size_t Idx = ShiftLeft ?
I : Regs.size() -
I - 1;
2090 Register SwapReg =
MRI.createVirtualRegister(&AVR::LD8RegClass);
2094 Register R =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
2100 Register AndReg =
MRI.createVirtualRegister(&AVR::LD8RegClass);
2103 .
addImm(ShiftLeft ? 0xf0 : 0x0f);
2105 Register R =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
2109 size_t PrevIdx = ShiftLeft ?
Idx - 1 :
Idx + 1;
2110 Regs[PrevIdx] = std::pair(R, 0);
2113 Regs[
Idx] = std::pair(AndReg, 0);
2120 while (ShiftLeft && ShiftAmt) {
2122 for (ssize_t
I = Regs.size() - 1;
I >= 0;
I--) {
2123 Register Out =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
2126 if (
I == (ssize_t)Regs.size() - 1) {
2129 .
addReg(In, 0, InSubreg);
2133 .
addReg(In, 0, InSubreg);
2135 Regs[
I] = std::pair(Out, 0);
2139 while (!ShiftLeft && ShiftAmt) {
2141 for (
size_t I = 0;
I < Regs.size();
I++) {
2142 Register Out =
MRI.createVirtualRegister(&AVR::GPR8RegClass);
2146 unsigned Opc = ArithmeticShift ? AVR::ASRRd : AVR::LSRRd;
2151 Regs[
I] = std::pair(Out, 0);
2156 if (ShiftAmt != 0) {
2163AVRTargetLowering::insertWideShift(MachineInstr &
MI,
2164 MachineBasicBlock *BB)
const {
2166 const DebugLoc &dl =
MI.getDebugLoc();
2170 int64_t ShiftAmt =
MI.getOperand(4).getImm();
2172 switch (
MI.getOpcode()) {
2185 std::array<std::pair<Register, int>, 4>
Registers = {
2186 std::pair(
MI.getOperand(3).getReg(), AVR::sub_hi),
2187 std::pair(
MI.getOperand(3).getReg(), AVR::sub_lo),
2188 std::pair(
MI.getOperand(2).getReg(), AVR::sub_hi),
2189 std::pair(
MI.getOperand(2).getReg(), AVR::sub_lo),
2207 (Opc !=
ISD::SRA || (ShiftAmt < 16 || ShiftAmt >= 22))) {
2209 BuildMI(*BB,
MI, dl,
TII.get(AVR::REG_SEQUENCE),
MI.getOperand(0).getReg())
2214 BuildMI(*BB,
MI, dl,
TII.get(AVR::REG_SEQUENCE),
MI.getOperand(1).getReg())
2221 BuildMI(*BB,
MI, dl,
TII.get(AVR::REG_SEQUENCE),
MI.getOperand(1).getReg())
2226 BuildMI(*BB,
MI, dl,
TII.get(AVR::REG_SEQUENCE),
MI.getOperand(0).getReg())
2234 MI.eraseFromParent();
2239 if (
I->getOpcode() == AVR::COPY) {
2240 Register SrcReg =
I->getOperand(1).getReg();
2241 return (SrcReg == AVR::R0 || SrcReg == AVR::R1);
2250MachineBasicBlock *AVRTargetLowering::insertMul(MachineInstr &
MI,
2251 MachineBasicBlock *BB)
const {
2259 BuildMI(*BB,
I,
MI.getDebugLoc(),
TII.get(AVR::EORRdRr), AVR::R1)
2267AVRTargetLowering::insertCopyZero(MachineInstr &
MI,
2268 MachineBasicBlock *BB)
const {
2272 .
add(
MI.getOperand(0))
2274 MI.eraseFromParent();
2280MachineBasicBlock *AVRTargetLowering::insertAtomicArithmeticOp(
2281 MachineInstr &
MI, MachineBasicBlock *BB,
unsigned Opcode,
int Width)
const {
2282 MachineRegisterInfo &
MRI = BB->getParent()->getRegInfo();
2296 const TargetRegisterClass *RC =
2297 (Width == 8) ? &AVR::GPR8RegClass : &AVR::DREGSRegClass;
2298 unsigned LoadOpcode = (Width == 8) ? AVR::LDRdPtr : AVR::LDWRdPtr;
2299 unsigned StoreOpcode = (Width == 8) ? AVR::STPtrRr : AVR::STWPtrRr;
2307 BuildMI(*BB,
I, dl,
TII.get(LoadOpcode),
MI.getOperand(0).getReg())
2308 .
add(
MI.getOperand(1));
2314 .
add(
MI.getOperand(2));
2318 .
add(
MI.getOperand(1))
2327 MI.eraseFromParent();
2334 int Opc =
MI.getOpcode();
2350 return insertShift(
MI,
MBB, STI.hasTinyEncoding());
2354 return insertWideShift(
MI,
MBB);
2357 return insertMul(
MI,
MBB);
2359 return insertCopyZero(
MI,
MBB);
2360 case AVR::AtomicLoadAdd8:
2361 return insertAtomicArithmeticOp(
MI,
MBB, AVR::ADDRdRr, 8);
2362 case AVR::AtomicLoadAdd16:
2363 return insertAtomicArithmeticOp(
MI,
MBB, AVR::ADDWRdRr, 16);
2364 case AVR::AtomicLoadSub8:
2365 return insertAtomicArithmeticOp(
MI,
MBB, AVR::SUBRdRr, 8);
2366 case AVR::AtomicLoadSub16:
2367 return insertAtomicArithmeticOp(
MI,
MBB, AVR::SUBWRdRr, 16);
2368 case AVR::AtomicLoadAnd8:
2369 return insertAtomicArithmeticOp(
MI,
MBB, AVR::ANDRdRr, 8);
2370 case AVR::AtomicLoadAnd16:
2371 return insertAtomicArithmeticOp(
MI,
MBB, AVR::ANDWRdRr, 16);
2372 case AVR::AtomicLoadOr8:
2373 return insertAtomicArithmeticOp(
MI,
MBB, AVR::ORRdRr, 8);
2374 case AVR::AtomicLoadOr16:
2375 return insertAtomicArithmeticOp(
MI,
MBB, AVR::ORWRdRr, 16);
2376 case AVR::AtomicLoadXor8:
2377 return insertAtomicArithmeticOp(
MI,
MBB, AVR::EORRdRr, 8);
2378 case AVR::AtomicLoadXor16:
2379 return insertAtomicArithmeticOp(
MI,
MBB, AVR::EORWRdRr, 16);
2382 assert((Opc == AVR::Select16 || Opc == AVR::Select8) &&
2383 "Unexpected instr type to insert");
2404 if (FallThrough !=
nullptr) {
2420 unsigned CallFrameSize =
TII.getCallFrameSizeAt(
MI);
2434 MBB->addSuccessor(falseMBB);
2435 MBB->addSuccessor(trueMBB);
2443 MI.getOperand(0).getReg())
2449 MI.eraseFromParent();
2459 if (Constraint.
size() == 1) {
2461 switch (Constraint[0]) {
2504 switch (ConstraintCode[0]) {
2515 Value *CallOperandVal =
info.CallOperandVal;
2520 if (!CallOperandVal) {
2525 switch (*constraint) {
2549 if (
const ConstantFP *
C = dyn_cast<ConstantFP>(CallOperandVal)) {
2556 if (
const ConstantInt *
C = dyn_cast<ConstantInt>(CallOperandVal)) {
2557 if (isUInt<6>(
C->getZExtValue())) {
2563 if (
const ConstantInt *
C = dyn_cast<ConstantInt>(CallOperandVal)) {
2564 if ((
C->getSExtValue() >= -63) && (
C->getSExtValue() <= 0)) {
2570 if (
const ConstantInt *
C = dyn_cast<ConstantInt>(CallOperandVal)) {
2571 if (
C->getZExtValue() == 2) {
2577 if (
const ConstantInt *
C = dyn_cast<ConstantInt>(CallOperandVal)) {
2578 if (
C->getZExtValue() == 0) {
2584 if (
const ConstantInt *
C = dyn_cast<ConstantInt>(CallOperandVal)) {
2585 if (isUInt<8>(
C->getZExtValue())) {
2591 if (
const ConstantInt *
C = dyn_cast<ConstantInt>(CallOperandVal)) {
2592 if (
C->getSExtValue() == -1) {
2598 if (
const ConstantInt *
C = dyn_cast<ConstantInt>(CallOperandVal)) {
2599 if ((
C->getZExtValue() == 8) || (
C->getZExtValue() == 16) ||
2600 (
C->getZExtValue() == 24)) {
2606 if (
const ConstantInt *
C = dyn_cast<ConstantInt>(CallOperandVal)) {
2607 if (
C->getZExtValue() == 1) {
2613 if (
const ConstantInt *
C = dyn_cast<ConstantInt>(CallOperandVal)) {
2614 if ((
C->getSExtValue() >= -6) && (
C->getSExtValue() <= 5)) {
2627std::pair<unsigned, const TargetRegisterClass *>
2631 if (Constraint.
size() == 1) {
2632 switch (Constraint[0]) {
2635 return std::make_pair(0U, &AVR::LD8loRegClass);
2636 else if (VT == MVT::i16)
2637 return std::make_pair(0U, &AVR::DREGSLD8loRegClass);
2640 if (VT == MVT::i8 || VT == MVT::i16)
2641 return std::make_pair(0U, &AVR::PTRDISPREGSRegClass);
2645 return std::make_pair(0U, &AVR::LD8RegClass);
2646 else if (VT == MVT::i16)
2647 return std::make_pair(0U, &AVR::DLDREGSRegClass);
2651 return std::make_pair(0U, &AVR::GPR8loRegClass);
2652 else if (VT == MVT::i16)
2653 return std::make_pair(0U, &AVR::DREGSloRegClass);
2656 if (VT == MVT::i8 || VT == MVT::i16)
2657 return std::make_pair(0U, &AVR::PTRREGSRegClass);
2660 return std::make_pair(0U, &AVR::GPRSPRegClass);
2663 return std::make_pair(0U, &AVR::GPR8RegClass);
2664 else if (VT == MVT::i16)
2665 return std::make_pair(0U, &AVR::DREGSRegClass);
2670 &AVR::GPR8RegClass);
2673 if (VT == MVT::i8 || VT == MVT::i16)
2674 return std::make_pair(0U, &AVR::IWREGSRegClass);
2678 if (VT == MVT::i8 || VT == MVT::i16)
2679 return std::make_pair(
unsigned(AVR::R27R26), &AVR::PTRREGSRegClass);
2683 if (VT == MVT::i8 || VT == MVT::i16)
2684 return std::make_pair(
unsigned(AVR::R29R28), &AVR::PTRREGSRegClass);
2688 if (VT == MVT::i8 || VT == MVT::i16)
2689 return std::make_pair(
unsigned(AVR::R31R30), &AVR::PTRREGSRegClass);
2702 std::vector<SDValue> &Ops,
2706 EVT Ty =
Op.getValueType();
2709 if (Constraint.
size() != 1) {
2713 char ConstraintLetter = Constraint[0];
2714 switch (ConstraintLetter) {
2732 int64_t CVal64 =
C->getSExtValue();
2734 switch (ConstraintLetter) {
2736 if (!isUInt<6>(CUVal64))
2741 if (CVal64 < -63 || CVal64 > 0)
2756 if (!isUInt<8>(CUVal64))
2772 if (CUVal64 != 8 && CUVal64 != 16 && CUVal64 != 24)
2782 if (CVal64 < -6 || CVal64 > 5)
2792 if (!FC || !FC->isZero())
2799 if (Result.getNode()) {
2800 Ops.push_back(Result);
2813 .
Case(
"r0", AVR::R0)
2814 .
Case(
"r1", AVR::R1)
2818 .
Case(
"r0", AVR::R1R0)
2819 .
Case(
"sp", AVR::SP)
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
const HexagonInstrInfo * TII
unsigned const TargetRegisterInfo * TRI
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
SI Pre allocate WWM Registers
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
Utilities related to the AVR instruction set.
A specific AVR target MCU.
Register getTmpRegister() const
Register getZeroRegister() const
const AVRInstrInfo * getInstrInfo() const override
const AVRRegisterInfo * getRegisterInfo() const override
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
Replace a node with an illegal result type with a new node built out of custom code.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
Returns true by value, base pointer and offset pointer and addressing mode by reference if the node's...
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
ConstraintType getConstraintType(StringRef Constraint) const override
Given a constraint, return the type of constraint it is for this target.
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
const AVRSubtarget & Subtarget
InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const override
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
AVRTargetLowering(const AVRTargetMachine &TM, const AVRSubtarget &STI)
void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
Lower the specified operand into the Ops vector.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
Return the ValueType of the result of SETCC operations.
bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
Returns true by value, base pointer and offset pointer and addressing mode by reference if this node ...
A generic AVR implementation.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
LLVM Basic Block Representation.
CCState - This class holds information needed while lowering arguments and return values.
MCRegister AllocateReg(MCPhysReg Reg)
AllocateReg - Attempt to allocate one register.
LLVMContext & getContext() const
int64_t AllocateStack(unsigned Size, Align Alignment)
AllocateStack - Allocate a chunk of stack space with the specified size and alignment.
void addLoc(const CCValAssign &V)
static CCValAssign getReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP, bool IsCustom=false)
static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)
ConstantFP - Floating Point Values [float, double].
This is the shared class of boolean and integer constants.
This class represents an Operation in the Expression.
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
Align getABITypeAlign(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
This is an important class for using LLVM in a threaded context.
This class is used to represent ISD::LOAD nodes.
static auto integer_valuetypes()
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
void setCallFrameSize(unsigned N)
Set the call frame size on entry to this basic block.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
BasicBlockListType::iterator iterator
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
MutableArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDValue getValue(unsigned R) const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
This class is used to represent ISD::STORE nodes.
StringRef - Represent a constant reference to a string, i.e.
constexpr size_t size() const
size - Get the string size.
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
static StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
TargetInstrInfo - Interface to description of machine instruction set.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const
Get the CallingConv that should be used for the specified libcall.
void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC)
Set the CallingConv that should be used for the specified libcall.
void setIndexedLoadAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed load does or does not work with the specified type and indicate w...
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
void setIndexedStoreAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed store does or does not work with the specified type and indicate ...
void setSupportsUnalignedAtomics(bool UnalignedSupported)
Sets whether unaligned atomic operations are supported.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
void setMinimumJumpTableEntries(unsigned Val)
Indicate the minimum number of blocks to generate jump tables.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
@ ZeroOrOneBooleanContent
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
std::vector< ArgListEntry > ArgListTy
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetInstrInfo * getInstrInfo() const
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM Value Representation.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
CondCodes
AVR specific condition codes.
@ COND_SH
Unsigned same or higher.
@ COND_GE
Greater than or equal.
@ ASRWN
Word arithmetic shift right N bits.
@ RET_GLUE
Return from subroutine.
@ SWAP
Swap Rd[7:4] <-> Rd[3:0].
@ RETI_GLUE
Return from ISR.
@ LSLW
Wide logical shift left.
@ ROLLOOP
A loop of single left bit rotate instructions.
@ ASRLO
Lower 8-bit of word arithmetic shift right.
@ ASRLOOP
A loop of single arithmetic shift right instructions.
@ LSRLOOP
A loop of single logical shift right instructions.
@ LSR
Logical shift right.
@ LSRLO
Lower 8-bit of word logical shift right.
@ TST
Test for zero or minus instruction.
@ LSRBN
Byte logical shift right N bits.
@ ASRW
Wide arithmetic shift right.
@ SELECT_CC
Operand 0 and operand 1 are selection variable, operand 2 is condition code and operand 3 is flag ope...
@ CMPC
Compare with carry instruction.
@ LSLWN
Word logical shift left N bits.
@ RORLOOP
A loop of single right bit rotate instructions.
@ CMP
Compare instruction.
@ ASRBN
Byte arithmetic shift right N bits.
@ CALL
Represents an abstract call instruction, which includes a bunch of information.
@ ASR
Arithmetic shift right.
@ LSRW
Wide logical shift right.
@ LSLBN
Byte logical shift left N bits.
@ LSLHI
Higher 8-bit of word logical shift left.
@ LSRWN
Word logical shift right N bits.
@ WRAPPER
A wrapper node for TargetConstantPool, TargetExternalSymbol, and TargetGlobalAddress.
@ LSLLOOP
A loop of single logical shift left instructions.
@ BRCOND
AVR conditional branches.
bool isProgramMemoryAccess(MemSDNode const *N)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ AVR_BUILTIN
Used for special AVR rtlib functions which have an "optimized" convention to preserve registers.
@ C
The default llvm calling convention, compatible with C.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ BSWAP
Byte Swap and Counting operators.
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
@ ADD
Simple integer binary arithmetic operators.
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ SIGN_EXTEND
Conversion operators.
@ BR_CC
BR_CC - Conditional branch.
@ BR_JT
BR_JT - Jumptable branch.
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ BasicBlock
Various leaf nodes.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ ATOMIC_CMP_SWAP
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo,...
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
@ INLINEASM
INLINEASM - Represents an inline asm block.
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ BRCOND
BRCOND - Conditional branch.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.
@ Undef
Value of the register doesn't matter.
Type
MessagePack types as defined in the standard, with the exception of Integer being divided into a sign...
This is an optimization pass for GlobalISel generic memory operations.
static void analyzeReturnValues(const SmallVectorImpl< ArgT > &Args, CCState &CCInfo, bool Tiny)
Analyze incoming and outgoing value of returning from a function.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
static const MCPhysReg RegList16Tiny[]
static const MCPhysReg RegList8Tiny[]
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
static void analyzeArguments(TargetLowering::CallLoweringInfo *CLI, const Function *F, const DataLayout *TD, const SmallVectorImpl< ArgT > &Args, SmallVectorImpl< CCValAssign > &ArgLocs, CCState &CCInfo, bool Tiny)
Analyze incoming and outgoing function arguments.
static const MCPhysReg RegList16AVR[]
static unsigned getTotalArgumentsSizeInBytes(const SmallVectorImpl< ArgT > &Args)
Count the total number of bytes needed to pass or return these arguments.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
DWARFExpression::Operation Op
static AVRCC::CondCodes intCCToAVRCC(ISD::CondCode CC)
IntCCToAVRCC - Convert a DAG integer condition code to an AVR CC.
static bool isCopyMulResult(MachineBasicBlock::iterator const &I)
static void insertMultibyteShift(MachineInstr &MI, MachineBasicBlock *BB, MutableArrayRef< std::pair< Register, int > > Regs, ISD::NodeType Opc, int64_t ShiftAmt)
static const MCPhysReg RegList8AVR[]
Registers for calling conventions, ordered in reverse as required by ABI.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
bool isVector() const
Return true if this is a vector value type.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
This contains information for each constraint that we are lowering.
This structure contains all information that is necessary for lowering calls.