72#define DEBUG_TYPE "mips-fastisel"
80class MipsFastISel final :
public FastISel {
85 using BaseKind =
enum { RegBase, FrameIndexBase };
88 BaseKind Kind = RegBase;
102 void setKind(BaseKind K) { Kind = K; }
103 BaseKind getKind()
const {
return Kind; }
104 bool isRegBase()
const {
return Kind == RegBase; }
105 bool isFIBase()
const {
return Kind == FrameIndexBase; }
107 void setReg(
unsigned Reg) {
108 assert(isRegBase() &&
"Invalid base register access!");
113 assert(isRegBase() &&
"Invalid base register access!");
117 void setFI(
unsigned FI) {
118 assert(isFIBase() &&
"Invalid base frame index access!");
122 unsigned getFI()
const {
123 assert(isFIBase() &&
"Invalid base frame index access!");
127 void setOffset(int64_t Offset_) { Offset = Offset_; }
128 int64_t
getOffset()
const {
return Offset; }
148 bool UnsupportedFPMode;
162 bool selectFPToInt(
const Instruction *
I,
bool IsSigned);
167 bool selectDivRem(
const Instruction *
I,
unsigned ISDOpcode);
170 bool isTypeLegal(
Type *Ty,
MVT &VT);
171 bool isTypeSupported(
Type *Ty,
MVT &VT);
172 bool isLoadTypeLegal(
Type *Ty,
MVT &VT);
181 unsigned emitIntExt(
MVT SrcVT,
unsigned SrcReg,
MVT DestVT,
bool isZExt);
182 bool emitIntExt(
MVT SrcVT,
unsigned SrcReg,
MVT DestVT,
unsigned DestReg,
185 bool emitIntZExt(
MVT SrcVT,
unsigned SrcReg,
MVT DestVT,
unsigned DestReg);
187 bool emitIntSExt(
MVT SrcVT,
unsigned SrcReg,
MVT DestVT,
unsigned DestReg);
188 bool emitIntSExt32r1(
MVT SrcVT,
unsigned SrcReg,
MVT DestVT,
190 bool emitIntSExt32r2(
MVT SrcVT,
unsigned SrcReg,
MVT DestVT,
193 unsigned getRegEnsuringSimpleIntegerWidening(
const Value *,
bool IsUnsigned);
195 unsigned emitLogicalOp(
unsigned ISDOpc,
MVT RetVT,
const Value *
LHS,
202 unsigned materializeExternalCallSym(
MCSymbol *Syn);
205 return BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(Opc));
209 return BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(Opc),
214 unsigned MemReg, int64_t MemOffset) {
219 unsigned MemReg, int64_t MemOffset) {
220 return emitInst(Opc, DstReg).
addReg(MemReg).
addImm(MemOffset);
225 unsigned Op0,
unsigned Op1);
240 bool finishCall(CallLoweringInfo &CLI,
MVT RetVT,
unsigned NumBytes);
262#include "MipsGenFastISel.inc"
283#include "MipsGenCallingConv.inc"
289unsigned MipsFastISel::emitLogicalOp(
unsigned ISDOpc,
MVT RetVT,
292 if (isa<ConstantInt>(LHS) && !isa<ConstantInt>(RHS))
310 Register LHSReg = getRegForValue(LHS);
315 if (
const auto *
C = dyn_cast<ConstantInt>(RHS))
316 RHSReg = materializeInt(
C, MVT::i32);
318 RHSReg = getRegForValue(RHS);
322 Register ResultReg = createResultReg(&Mips::GPR32RegClass);
326 emitInst(Opc, ResultReg).addReg(LHSReg).addReg(RHSReg);
330unsigned MipsFastISel::fastMaterializeAlloca(
const AllocaInst *AI) {
332 "Alloca should always return a pointer.");
335 FuncInfo.StaticAllocaMap.find(AI);
337 if (SI != FuncInfo.StaticAllocaMap.end()) {
338 Register ResultReg = createResultReg(&Mips::GPR32RegClass);
339 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(Mips::LEA_ADDiu),
349unsigned MipsFastISel::materializeInt(
const Constant *
C,
MVT VT) {
350 if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1)
357unsigned MipsFastISel::materialize32BitInt(int64_t Imm,
359 Register ResultReg = createResultReg(RC);
361 if (isInt<16>(Imm)) {
362 unsigned Opc = Mips::ADDiu;
363 emitInst(Opc, ResultReg).addReg(Mips::ZERO).addImm(Imm);
365 }
else if (isUInt<16>(Imm)) {
366 emitInst(Mips::ORi, ResultReg).addReg(Mips::ZERO).addImm(Imm);
369 unsigned Lo =
Imm & 0xFFFF;
370 unsigned Hi = (
Imm >> 16) & 0xFFFF;
373 Register TmpReg = createResultReg(RC);
374 emitInst(Mips::LUi, TmpReg).addImm(
Hi);
375 emitInst(Mips::ORi, ResultReg).addReg(TmpReg).addImm(
Lo);
377 emitInst(Mips::LUi, ResultReg).addImm(
Hi);
382unsigned MipsFastISel::materializeFP(
const ConstantFP *CFP,
MVT VT) {
383 if (UnsupportedFPMode)
386 if (VT == MVT::f32) {
388 Register DestReg = createResultReg(RC);
389 unsigned TempReg = materialize32BitInt(Imm, &Mips::GPR32RegClass);
390 emitInst(Mips::MTC1, DestReg).addReg(TempReg);
392 }
else if (VT == MVT::f64) {
394 Register DestReg = createResultReg(RC);
395 unsigned TempReg1 = materialize32BitInt(Imm >> 32, &Mips::GPR32RegClass);
397 materialize32BitInt(Imm & 0xFFFFFFFF, &Mips::GPR32RegClass);
398 emitInst(Mips::BuildPairF64, DestReg).addReg(TempReg2).addReg(TempReg1);
404unsigned MipsFastISel::materializeGV(
const GlobalValue *GV,
MVT VT) {
409 Register DestReg = createResultReg(RC);
415 emitInst(Mips::LW, DestReg)
416 .addReg(MFI->getGlobalBaseReg(*MF))
420 Register TempReg = createResultReg(RC);
421 emitInst(Mips::ADDiu, TempReg)
429unsigned MipsFastISel::materializeExternalCallSym(
MCSymbol *
Sym) {
431 Register DestReg = createResultReg(RC);
432 emitInst(Mips::LW, DestReg)
433 .addReg(MFI->getGlobalBaseReg(*MF))
440unsigned MipsFastISel::fastMaterializeConstant(
const Constant *
C) {
441 EVT CEVT = TLI.getValueType(
DL,
C->getType(),
true);
448 if (
const ConstantFP *CFP = dyn_cast<ConstantFP>(
C))
449 return (UnsupportedFPMode) ? 0 : materializeFP(CFP, VT);
450 else if (
const GlobalValue *GV = dyn_cast<GlobalValue>(
C))
451 return materializeGV(GV, VT);
452 else if (isa<ConstantInt>(
C))
453 return materializeInt(
C, VT);
458bool MipsFastISel::computeAddress(
const Value *Obj, Address &
Addr) {
459 const User *
U =
nullptr;
460 unsigned Opcode = Instruction::UserOp1;
461 if (
const Instruction *
I = dyn_cast<Instruction>(Obj)) {
464 if (FuncInfo.StaticAllocaMap.count(
static_cast<const AllocaInst *
>(Obj)) ||
465 FuncInfo.getMBB(
I->getParent()) == FuncInfo.MBB) {
466 Opcode =
I->getOpcode();
469 }
else if (
const ConstantExpr *
C = dyn_cast<ConstantExpr>(Obj)) {
470 Opcode =
C->getOpcode();
476 case Instruction::BitCast:
478 return computeAddress(
U->getOperand(0),
Addr);
479 case Instruction::GetElementPtr: {
481 int64_t TmpOffset =
Addr.getOffset();
490 unsigned Idx = cast<ConstantInt>(
Op)->getZExtValue();
497 TmpOffset += CI->getSExtValue() * S;
500 if (canFoldAddIntoGEP(U,
Op)) {
503 cast<ConstantInt>(cast<AddOperator>(
Op)->getOperand(1));
506 Op = cast<AddOperator>(
Op)->getOperand(0);
510 goto unsupported_gep;
515 Addr.setOffset(TmpOffset);
516 if (computeAddress(
U->getOperand(0),
Addr))
523 case Instruction::Alloca: {
526 FuncInfo.StaticAllocaMap.find(AI);
527 if (SI != FuncInfo.StaticAllocaMap.end()) {
528 Addr.setKind(Address::FrameIndexBase);
535 Addr.setReg(getRegForValue(Obj));
536 return Addr.getReg() != 0;
539bool MipsFastISel::computeCallAddress(
const Value *V, Address &
Addr) {
540 const User *
U =
nullptr;
541 unsigned Opcode = Instruction::UserOp1;
543 if (
const auto *
I = dyn_cast<Instruction>(V)) {
546 if (
I->getParent() == FuncInfo.MBB->getBasicBlock()) {
547 Opcode =
I->getOpcode();
550 }
else if (
const auto *
C = dyn_cast<ConstantExpr>(V)) {
551 Opcode =
C->getOpcode();
558 case Instruction::BitCast:
560 return computeCallAddress(
U->getOperand(0),
Addr);
562 case Instruction::IntToPtr:
564 if (TLI.getValueType(
DL,
U->getOperand(0)->getType()) ==
565 TLI.getPointerTy(
DL))
566 return computeCallAddress(
U->getOperand(0),
Addr);
568 case Instruction::PtrToInt:
570 if (TLI.getValueType(
DL,
U->getType()) == TLI.getPointerTy(
DL))
571 return computeCallAddress(
U->getOperand(0),
Addr);
575 if (
const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
576 Addr.setGlobalValue(GV);
581 if (!
Addr.getGlobalValue()) {
582 Addr.setReg(getRegForValue(V));
583 return Addr.getReg() != 0;
589bool MipsFastISel::isTypeLegal(
Type *Ty,
MVT &VT) {
590 EVT evt = TLI.getValueType(
DL, Ty,
true);
592 if (evt == MVT::Other || !evt.
isSimple())
598 return TLI.isTypeLegal(VT);
601bool MipsFastISel::isTypeSupported(
Type *Ty,
MVT &VT) {
605 if (isTypeLegal(Ty, VT))
610 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
616bool MipsFastISel::isLoadTypeLegal(
Type *Ty,
MVT &VT) {
617 if (isTypeLegal(Ty, VT))
622 if (VT == MVT::i8 || VT == MVT::i16)
631bool MipsFastISel::emitCmp(
unsigned ResultReg,
const CmpInst *CI) {
634 unsigned LeftReg = getRegEnsuringSimpleIntegerWidening(
Left, IsUnsigned);
637 unsigned RightReg = getRegEnsuringSimpleIntegerWidening(
Right, IsUnsigned);
646 Register TempReg = createResultReg(&Mips::GPR32RegClass);
647 emitInst(Mips::XOR, TempReg).addReg(LeftReg).addReg(RightReg);
648 emitInst(Mips::SLTiu, ResultReg).addReg(TempReg).addImm(1);
652 Register TempReg = createResultReg(&Mips::GPR32RegClass);
653 emitInst(Mips::XOR, TempReg).addReg(LeftReg).addReg(RightReg);
654 emitInst(Mips::SLTu, ResultReg).addReg(Mips::ZERO).addReg(TempReg);
658 emitInst(Mips::SLTu, ResultReg).addReg(RightReg).addReg(LeftReg);
661 emitInst(Mips::SLTu, ResultReg).addReg(LeftReg).addReg(RightReg);
664 Register TempReg = createResultReg(&Mips::GPR32RegClass);
665 emitInst(Mips::SLTu, TempReg).addReg(LeftReg).addReg(RightReg);
666 emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1);
670 Register TempReg = createResultReg(&Mips::GPR32RegClass);
671 emitInst(Mips::SLTu, TempReg).addReg(RightReg).addReg(LeftReg);
672 emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1);
676 emitInst(Mips::SLT, ResultReg).addReg(RightReg).addReg(LeftReg);
679 emitInst(Mips::SLT, ResultReg).addReg(LeftReg).addReg(RightReg);
682 Register TempReg = createResultReg(&Mips::GPR32RegClass);
683 emitInst(Mips::SLT, TempReg).addReg(LeftReg).addReg(RightReg);
684 emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1);
688 Register TempReg = createResultReg(&Mips::GPR32RegClass);
689 emitInst(Mips::SLT, TempReg).addReg(RightReg).addReg(LeftReg);
690 emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1);
699 if (UnsupportedFPMode)
701 bool IsFloat =
Left->getType()->isFloatTy();
702 bool IsDouble =
Left->getType()->isDoubleTy();
703 if (!IsFloat && !IsDouble)
705 unsigned Opc, CondMovOpc;
708 Opc = IsFloat ? Mips::C_EQ_S : Mips::C_EQ_D32;
709 CondMovOpc = Mips::MOVT_I;
712 Opc = IsFloat ? Mips::C_EQ_S : Mips::C_EQ_D32;
713 CondMovOpc = Mips::MOVF_I;
716 Opc = IsFloat ? Mips::C_OLT_S : Mips::C_OLT_D32;
717 CondMovOpc = Mips::MOVT_I;
720 Opc = IsFloat ? Mips::C_OLE_S : Mips::C_OLE_D32;
721 CondMovOpc = Mips::MOVT_I;
724 Opc = IsFloat ? Mips::C_ULE_S : Mips::C_ULE_D32;
725 CondMovOpc = Mips::MOVF_I;
728 Opc = IsFloat ? Mips::C_ULT_S : Mips::C_ULT_D32;
729 CondMovOpc = Mips::MOVF_I;
734 Register RegWithZero = createResultReg(&Mips::GPR32RegClass);
735 Register RegWithOne = createResultReg(&Mips::GPR32RegClass);
736 emitInst(Mips::ADDiu, RegWithZero).addReg(Mips::ZERO).addImm(0);
737 emitInst(Mips::ADDiu, RegWithOne).addReg(Mips::ZERO).addImm(1);
740 emitInst(CondMovOpc, ResultReg)
743 .addReg(RegWithZero);
750bool MipsFastISel::emitLoad(
MVT VT,
unsigned &ResultReg, Address &
Addr) {
757 ResultReg = createResultReg(&Mips::GPR32RegClass);
761 ResultReg = createResultReg(&Mips::GPR32RegClass);
765 ResultReg = createResultReg(&Mips::GPR32RegClass);
769 if (UnsupportedFPMode)
771 ResultReg = createResultReg(&Mips::FGR32RegClass);
775 if (UnsupportedFPMode)
777 ResultReg = createResultReg(&Mips::AFGR64RegClass);
783 if (
Addr.isRegBase()) {
784 simplifyAddress(
Addr);
785 emitInstLoad(Opc, ResultReg,
Addr.getReg(),
Addr.getOffset());
788 if (
Addr.isFIBase()) {
789 unsigned FI =
Addr.getFI();
795 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(Opc), ResultReg)
804bool MipsFastISel::emitStore(
MVT VT,
unsigned SrcReg, Address &
Addr) {
820 if (UnsupportedFPMode)
825 if (UnsupportedFPMode)
832 if (
Addr.isRegBase()) {
833 simplifyAddress(
Addr);
834 emitInstStore(Opc, SrcReg,
Addr.getReg(),
Addr.getOffset());
837 if (
Addr.isFIBase()) {
838 unsigned FI =
Addr.getFI();
844 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(Opc))
854bool MipsFastISel::selectLogicalOp(
const Instruction *
I) {
856 if (!isTypeSupported(
I->getType(), VT))
860 switch (
I->getOpcode()) {
863 case Instruction::And:
864 ResultReg = emitLogicalOp(
ISD::AND, VT,
I->getOperand(0),
I->getOperand(1));
866 case Instruction::Or:
867 ResultReg = emitLogicalOp(
ISD::OR, VT,
I->getOperand(0),
I->getOperand(1));
869 case Instruction::Xor:
870 ResultReg = emitLogicalOp(
ISD::XOR, VT,
I->getOperand(0),
I->getOperand(1));
877 updateValueMap(
I, ResultReg);
890 if (!isLoadTypeLegal(LI->
getType(), VT))
895 !Subtarget->systemSupportsUnalignedAccess())
906 updateValueMap(LI, ResultReg);
913 Value *Op0 =
SI->getOperand(0);
922 if (!isLoadTypeLegal(
SI->getOperand(0)->getType(), VT))
927 !Subtarget->systemSupportsUnalignedAccess())
931 SrcReg = getRegForValue(Op0);
937 if (!computeAddress(
SI->getOperand(1),
Addr))
962 unsigned ZExtCondReg = 0;
965 ZExtCondReg = createResultReg(&Mips::GPR32RegClass);
972 if (ZExtCondReg == 0) {
977 ZExtCondReg = emitIntExt(MVT::i1, CondReg, MVT::i32,
true);
978 if (ZExtCondReg == 0)
982 BuildMI(*BrBB, FuncInfo.InsertPt, MIMD,
TII.get(Mips::BGTZ))
990 const CmpInst *CI = cast<CmpInst>(
I);
991 Register ResultReg = createResultReg(&Mips::GPR32RegClass);
994 updateValueMap(
I, ResultReg);
1000 if (UnsupportedFPMode)
1002 Value *Src =
I->getOperand(0);
1003 EVT SrcVT = TLI.getValueType(
DL, Src->getType(),
true);
1004 EVT DestVT = TLI.getValueType(
DL,
I->getType(),
true);
1006 if (SrcVT != MVT::f32 || DestVT != MVT::f64)
1010 getRegForValue(Src);
1015 Register DestReg = createResultReg(&Mips::AFGR64RegClass);
1016 emitInst(Mips::CVT_D32_S, DestReg).addReg(SrcReg);
1017 updateValueMap(
I, DestReg);
1022 assert(isa<SelectInst>(
I) &&
"Expected a select instruction.");
1027 if (!isTypeSupported(
I->getType(), VT) || UnsupportedFPMode) {
1029 dbgs() <<
".. .. gave up (!isTypeSupported || UnsupportedFPMode)\n");
1033 unsigned CondMovOpc;
1037 CondMovOpc = Mips::MOVN_I_I;
1038 RC = &Mips::GPR32RegClass;
1039 }
else if (VT == MVT::f32) {
1040 CondMovOpc = Mips::MOVN_I_S;
1041 RC = &Mips::FGR32RegClass;
1042 }
else if (VT == MVT::f64) {
1043 CondMovOpc = Mips::MOVN_I_D32;
1044 RC = &Mips::AFGR64RegClass;
1050 Register Src1Reg = getRegForValue(
SI->getTrueValue());
1051 Register Src2Reg = getRegForValue(
SI->getFalseValue());
1054 if (!Src1Reg || !Src2Reg || !CondReg)
1057 Register ZExtCondReg = createResultReg(&Mips::GPR32RegClass);
1061 if (!emitIntExt(MVT::i1, CondReg, MVT::i32, ZExtCondReg,
true))
1064 Register ResultReg = createResultReg(RC);
1065 Register TempReg = createResultReg(RC);
1067 if (!ResultReg || !TempReg)
1070 emitInst(TargetOpcode::COPY, TempReg).addReg(Src2Reg);
1071 emitInst(CondMovOpc, ResultReg)
1072 .addReg(Src1Reg).addReg(ZExtCondReg).addReg(TempReg);
1073 updateValueMap(
I, ResultReg);
1078bool MipsFastISel::selectFPTrunc(
const Instruction *
I) {
1079 if (UnsupportedFPMode)
1081 Value *Src =
I->getOperand(0);
1082 EVT SrcVT = TLI.getValueType(
DL, Src->getType(),
true);
1083 EVT DestVT = TLI.getValueType(
DL,
I->getType(),
true);
1085 if (SrcVT != MVT::f64 || DestVT != MVT::f32)
1088 Register SrcReg = getRegForValue(Src);
1092 Register DestReg = createResultReg(&Mips::FGR32RegClass);
1096 emitInst(Mips::CVT_S_D32, DestReg).addReg(SrcReg);
1097 updateValueMap(
I, DestReg);
1102bool MipsFastISel::selectFPToInt(
const Instruction *
I,
bool IsSigned) {
1103 if (UnsupportedFPMode)
1109 Type *DstTy =
I->getType();
1110 if (!isTypeLegal(DstTy, DstVT))
1113 if (DstVT != MVT::i32)
1116 Value *Src =
I->getOperand(0);
1117 Type *SrcTy = Src->getType();
1118 if (!isTypeLegal(SrcTy, SrcVT))
1121 if (SrcVT != MVT::f32 && SrcVT != MVT::f64)
1124 Register SrcReg = getRegForValue(Src);
1130 Register DestReg = createResultReg(&Mips::GPR32RegClass);
1131 Register TempReg = createResultReg(&Mips::FGR32RegClass);
1132 unsigned Opc = (SrcVT == MVT::f32) ? Mips::TRUNC_W_S : Mips::TRUNC_W_D32;
1135 emitInst(Opc, TempReg).addReg(SrcReg);
1136 emitInst(Mips::MFC1, DestReg).addReg(TempReg);
1138 updateValueMap(
I, DestReg);
1142bool MipsFastISel::processCallArgs(CallLoweringInfo &CLI,
1144 unsigned &NumBytes) {
1147 CCState CCInfo(
CC,
false, *FuncInfo.MF, ArgLocs, *Context);
1148 CCInfo.AnalyzeCallOperands(OutVTs, CLI.OutFlags, CCAssignFnForCall(
CC));
1150 NumBytes = CCInfo.getStackSize();
1155 emitInst(Mips::ADJCALLSTACKDOWN).addImm(16).addImm(0);
1158 for (
unsigned i = 0, e = ArgLocs.
size(); i != e; ++i) {
1165 if (ArgVT == MVT::f32) {
1167 }
else if (ArgVT == MVT::f64) {
1168 if (Subtarget->isFP64bit())
1173 }
else if (i == 1) {
1174 if ((firstMVT == MVT::f32) || (firstMVT == MVT::f64)) {
1175 if (ArgVT == MVT::f32) {
1177 }
else if (ArgVT == MVT::f64) {
1178 if (Subtarget->isFP64bit())
1185 if (((ArgVT == MVT::i32) || (ArgVT == MVT::f32) || (ArgVT == MVT::i16) ||
1186 (ArgVT == MVT::i8)) &&
1205 Register ArgReg = getRegForValue(ArgVal);
1217 ArgReg = emitIntExt(SrcVT, ArgReg, DestVT,
false);
1225 ArgReg = emitIntExt(SrcVT, ArgReg, DestVT,
true);
1236 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1250 if (isa<UndefValue>(ArgVal))
1260 unsigned BEAlign = 0;
1261 if (ArgSize < 8 && !Subtarget->isLittle())
1262 BEAlign = 8 - ArgSize;
1265 Addr.setKind(Address::RegBase);
1266 Addr.setReg(Mips::SP);
1282bool MipsFastISel::finishCall(CallLoweringInfo &CLI,
MVT RetVT,
1283 unsigned NumBytes) {
1285 emitInst(Mips::ADJCALLSTACKUP).addImm(16).addImm(0);
1286 if (RetVT != MVT::isVoid) {
1288 MipsCCState CCInfo(
CC,
false, *FuncInfo.MF, RVLocs, *Context);
1290 CCInfo.AnalyzeCallResult(CLI.Ins, RetCC_Mips, CLI.RetTy,
1291 CLI.Symbol ? CLI.Symbol->getName().data()
1295 if (RVLocs.
size() != 1)
1298 MVT CopyVT = RVLocs[0].getValVT();
1300 if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16)
1303 Register ResultReg = createResultReg(TLI.getRegClassFor(CopyVT));
1306 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1307 TII.get(TargetOpcode::COPY),
1308 ResultReg).
addReg(RVLocs[0].getLocReg());
1309 CLI.InRegs.push_back(RVLocs[0].getLocReg());
1311 CLI.ResultReg = ResultReg;
1312 CLI.NumResultRegs = 1;
1317bool MipsFastISel::fastLowerArguments() {
1320 if (!FuncInfo.CanLowerReturn) {
1326 if (
F->isVarArg()) {
1333 LLVM_DEBUG(
dbgs() <<
".. gave up (calling convention is not C)\n");
1337 std::array<MCPhysReg, 4> GPR32ArgRegs = {{Mips::A0, Mips::A1, Mips::A2,
1339 std::array<MCPhysReg, 2> FGR32ArgRegs = {{Mips::F12, Mips::F14}};
1340 std::array<MCPhysReg, 2> AFGR64ArgRegs = {{Mips::D6, Mips::D7}};
1341 auto NextGPR32 = GPR32ArgRegs.begin();
1342 auto NextFGR32 = FGR32ArgRegs.begin();
1343 auto NextAFGR64 = AFGR64ArgRegs.begin();
1345 struct AllocatedReg {
1355 for (
const auto &FormalArg :
F->args()) {
1356 if (FormalArg.hasAttribute(Attribute::InReg) ||
1357 FormalArg.hasAttribute(Attribute::StructRet) ||
1358 FormalArg.hasAttribute(Attribute::ByVal)) {
1363 Type *ArgTy = FormalArg.getType();
1369 EVT ArgVT = TLI.getValueType(
DL, ArgTy);
1381 if (!FormalArg.hasAttribute(Attribute::SExt) &&
1382 !FormalArg.hasAttribute(Attribute::ZExt)) {
1385 LLVM_DEBUG(
dbgs() <<
".. .. gave up (i8/i16 arg is not extended)\n");
1389 if (NextGPR32 == GPR32ArgRegs.end()) {
1390 LLVM_DEBUG(
dbgs() <<
".. .. gave up (ran out of GPR32 arguments)\n");
1395 Allocation.
emplace_back(&Mips::GPR32RegClass, *NextGPR32++);
1398 NextFGR32 = FGR32ArgRegs.end();
1399 NextAFGR64 = AFGR64ArgRegs.end();
1403 if (FormalArg.hasAttribute(Attribute::ZExt)) {
1405 LLVM_DEBUG(
dbgs() <<
".. .. gave up (i32 arg is zero extended)\n");
1409 if (NextGPR32 == GPR32ArgRegs.end()) {
1410 LLVM_DEBUG(
dbgs() <<
".. .. gave up (ran out of GPR32 arguments)\n");
1415 Allocation.
emplace_back(&Mips::GPR32RegClass, *NextGPR32++);
1418 NextFGR32 = FGR32ArgRegs.end();
1419 NextAFGR64 = AFGR64ArgRegs.end();
1423 if (UnsupportedFPMode) {
1427 if (NextFGR32 == FGR32ArgRegs.end()) {
1428 LLVM_DEBUG(
dbgs() <<
".. .. gave up (ran out of FGR32 arguments)\n");
1432 Allocation.
emplace_back(&Mips::FGR32RegClass, *NextFGR32++);
1435 if (NextGPR32 != GPR32ArgRegs.end())
1437 if (NextAFGR64 != AFGR64ArgRegs.end())
1442 if (UnsupportedFPMode) {
1446 if (NextAFGR64 == AFGR64ArgRegs.end()) {
1447 LLVM_DEBUG(
dbgs() <<
".. .. gave up (ran out of AFGR64 arguments)\n");
1451 Allocation.
emplace_back(&Mips::AFGR64RegClass, *NextAFGR64++);
1454 if (NextGPR32 != GPR32ArgRegs.end())
1456 if (NextGPR32 != GPR32ArgRegs.end())
1458 if (NextFGR32 != FGR32ArgRegs.end())
1468 for (
const auto &FormalArg :
F->args()) {
1469 unsigned ArgNo = FormalArg.getArgNo();
1470 unsigned SrcReg = Allocation[ArgNo].Reg;
1471 Register DstReg = FuncInfo.MF->addLiveIn(SrcReg, Allocation[ArgNo].RC);
1475 Register ResultReg = createResultReg(Allocation[ArgNo].RC);
1476 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1477 TII.get(TargetOpcode::COPY), ResultReg)
1479 updateValueMap(&FormalArg, ResultReg);
1484 unsigned IncomingArgSizeInBytes = 0;
1489 IncomingArgSizeInBytes = std::min(getABI().GetCalleeAllocdArgSizeInBytes(
CC),
1490 IncomingArgSizeInBytes);
1498bool MipsFastISel::fastLowerCall(CallLoweringInfo &CLI) {
1500 bool IsTailCall = CLI.IsTailCall;
1501 bool IsVarArg = CLI.IsVarArg;
1519 if (CLI.RetTy->isVoidTy())
1520 RetVT = MVT::isVoid;
1521 else if (!isTypeSupported(CLI.RetTy, RetVT))
1524 for (
auto Flag : CLI.OutFlags)
1530 OutVTs.
reserve(CLI.OutVals.size());
1532 for (
auto *Val : CLI.OutVals) {
1534 if (!isTypeLegal(Val->getType(), VT) &&
1535 !(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16))
1546 if (!computeCallAddress(Callee,
Addr))
1551 if (!processCallArgs(CLI, OutVTs, NumBytes))
1554 if (!
Addr.getGlobalValue())
1558 unsigned DestAddress;
1560 DestAddress = materializeExternalCallSym(Symbol);
1562 DestAddress = materializeGV(
Addr.getGlobalValue(), MVT::i32);
1563 emitInst(TargetOpcode::COPY, Mips::T9).addReg(DestAddress);
1565 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(Mips::JALR),
1566 Mips::RA).
addReg(Mips::T9);
1569 for (
auto Reg : CLI.OutRegs)
1584 MIB.
addSym(FuncInfo.MF->getContext().getOrCreateSymbol(
1589 return finishCall(CLI, RetVT, NumBytes);
1593 switch (
II->getIntrinsicID()) {
1596 case Intrinsic::bswap: {
1597 Type *
RetTy =
II->getCalledFunction()->getReturnType();
1600 if (!isTypeSupported(
RetTy, VT))
1603 Register SrcReg = getRegForValue(
II->getOperand(0));
1606 Register DestReg = createResultReg(&Mips::GPR32RegClass);
1609 if (VT == MVT::i16) {
1610 if (Subtarget->hasMips32r2()) {
1611 emitInst(Mips::WSBH, DestReg).addReg(SrcReg);
1612 updateValueMap(
II, DestReg);
1615 unsigned TempReg[3];
1616 for (
unsigned &R : TempReg) {
1617 R = createResultReg(&Mips::GPR32RegClass);
1621 emitInst(Mips::SLL, TempReg[0]).addReg(SrcReg).addImm(8);
1622 emitInst(Mips::SRL, TempReg[1]).addReg(SrcReg).addImm(8);
1623 emitInst(Mips::ANDi, TempReg[2]).addReg(TempReg[1]).addImm(0xFF);
1624 emitInst(Mips::OR, DestReg).addReg(TempReg[0]).addReg(TempReg[2]);
1625 updateValueMap(
II, DestReg);
1628 }
else if (VT == MVT::i32) {
1629 if (Subtarget->hasMips32r2()) {
1630 Register TempReg = createResultReg(&Mips::GPR32RegClass);
1631 emitInst(Mips::WSBH, TempReg).addReg(SrcReg);
1632 emitInst(Mips::ROTR, DestReg).addReg(TempReg).addImm(16);
1633 updateValueMap(
II, DestReg);
1636 unsigned TempReg[8];
1637 for (
unsigned &R : TempReg) {
1638 R = createResultReg(&Mips::GPR32RegClass);
1643 emitInst(Mips::SRL, TempReg[0]).addReg(SrcReg).addImm(8);
1644 emitInst(Mips::SRL, TempReg[1]).addReg(SrcReg).addImm(24);
1645 emitInst(Mips::ANDi, TempReg[2]).addReg(TempReg[0]).addImm(0xFF00);
1646 emitInst(Mips::OR, TempReg[3]).addReg(TempReg[1]).addReg(TempReg[2]);
1648 emitInst(Mips::ANDi, TempReg[4]).addReg(SrcReg).addImm(0xFF00);
1649 emitInst(Mips::SLL, TempReg[5]).addReg(TempReg[4]).addImm(8);
1651 emitInst(Mips::SLL, TempReg[6]).addReg(SrcReg).addImm(24);
1652 emitInst(Mips::OR, TempReg[7]).addReg(TempReg[3]).addReg(TempReg[5]);
1653 emitInst(Mips::OR, DestReg).addReg(TempReg[6]).addReg(TempReg[7]);
1654 updateValueMap(
II, DestReg);
1660 case Intrinsic::memcpy:
1661 case Intrinsic::memmove: {
1662 const auto *MTI = cast<MemTransferInst>(
II);
1664 if (MTI->isVolatile())
1666 if (!MTI->getLength()->getType()->isIntegerTy(32))
1668 const char *IntrMemName = isa<MemCpyInst>(
II) ?
"memcpy" :
"memmove";
1669 return lowerCallTo(
II, IntrMemName,
II->arg_size() - 1);
1671 case Intrinsic::memset: {
1678 return lowerCallTo(
II,
"memset",
II->arg_size() - 1);
1685 const Function &
F = *
I->getParent()->getParent();
1690 if (!FuncInfo.CanLowerReturn)
1696 if (
Ret->getNumOperands() > 0) {
1711 CCInfo.AnalyzeReturn(Outs, RetCC);
1714 if (ValLocs.
size() != 1)
1718 const Value *RV =
Ret->getOperand(0);
1736 if (!
MRI.getRegClass(SrcReg)->contains(DestReg))
1747 if (RVVT == MVT::f128)
1751 if (RVVT == MVT::f64 && UnsupportedFPMode) {
1758 if (RVVT != DestVT) {
1759 if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16)
1762 if (Outs[0].
Flags.isZExt() || Outs[0].Flags.isSExt()) {
1763 bool IsZExt = Outs[0].Flags.isZExt();
1764 SrcReg = emitIntExt(RVVT, SrcReg, DestVT, IsZExt);
1771 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1772 TII.get(TargetOpcode::COPY), DestReg).
addReg(SrcReg);
1778 for (
unsigned Reg : RetRegs)
1789 SrcVT = TLI.getValueType(
DL,
Op->getType(),
true);
1790 DestVT = TLI.getValueType(
DL,
I->getType(),
true);
1792 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8)
1794 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1)
1803 updateValueMap(
I, SrcReg);
1808 Type *DestTy =
I->getType();
1809 Value *Src =
I->getOperand(0);
1810 Type *SrcTy = Src->getType();
1812 bool isZExt = isa<ZExtInst>(
I);
1813 Register SrcReg = getRegForValue(Src);
1817 EVT SrcEVT, DestEVT;
1818 SrcEVT = TLI.getValueType(
DL, SrcTy,
true);
1819 DestEVT = TLI.getValueType(
DL, DestTy,
true);
1827 Register ResultReg = createResultReg(&Mips::GPR32RegClass);
1829 if (!emitIntExt(SrcVT, SrcReg, DestVT, ResultReg, isZExt))
1831 updateValueMap(
I, ResultReg);
1835bool MipsFastISel::emitIntSExt32r1(
MVT SrcVT,
unsigned SrcReg,
MVT DestVT,
1848 Register TempReg = createResultReg(&Mips::GPR32RegClass);
1849 emitInst(Mips::SLL, TempReg).addReg(SrcReg).addImm(ShiftAmt);
1850 emitInst(Mips::SRA, DestReg).addReg(TempReg).addImm(ShiftAmt);
1854bool MipsFastISel::emitIntSExt32r2(
MVT SrcVT,
unsigned SrcReg,
MVT DestVT,
1860 emitInst(Mips::SEB, DestReg).addReg(SrcReg);
1863 emitInst(Mips::SEH, DestReg).addReg(SrcReg);
1869bool MipsFastISel::emitIntSExt(
MVT SrcVT,
unsigned SrcReg,
MVT DestVT,
1871 if ((DestVT != MVT::i32) && (DestVT != MVT::i16))
1873 if (Subtarget->hasMips32r2())
1874 return emitIntSExt32r2(SrcVT, SrcReg, DestVT, DestReg);
1875 return emitIntSExt32r1(SrcVT, SrcReg, DestVT, DestReg);
1878bool MipsFastISel::emitIntZExt(
MVT SrcVT,
unsigned SrcReg,
MVT DestVT,
1896 emitInst(Mips::ANDi, DestReg).addReg(SrcReg).addImm(Imm);
1900bool MipsFastISel::emitIntExt(
MVT SrcVT,
unsigned SrcReg,
MVT DestVT,
1901 unsigned DestReg,
bool IsZExt) {
1906 if (((DestVT != MVT::i8) && (DestVT != MVT::i16) && (DestVT != MVT::i32)) ||
1907 ((SrcVT != MVT::i1) && (SrcVT != MVT::i8) && (SrcVT != MVT::i16)))
1910 return emitIntZExt(SrcVT, SrcReg, DestVT, DestReg);
1911 return emitIntSExt(SrcVT, SrcReg, DestVT, DestReg);
1914unsigned MipsFastISel::emitIntExt(
MVT SrcVT,
unsigned SrcReg,
MVT DestVT,
1916 unsigned DestReg = createResultReg(&Mips::GPR32RegClass);
1917 bool Success = emitIntExt(SrcVT, SrcReg, DestVT, DestReg, isZExt);
1921bool MipsFastISel::selectDivRem(
const Instruction *
I,
unsigned ISDOpcode) {
1922 EVT DestEVT = TLI.getValueType(
DL,
I->getType(),
true);
1927 if (DestVT != MVT::i32)
1931 switch (ISDOpcode) {
1936 DivOpc = Mips::SDIV;
1940 DivOpc = Mips::UDIV;
1944 Register Src0Reg = getRegForValue(
I->getOperand(0));
1945 Register Src1Reg = getRegForValue(
I->getOperand(1));
1946 if (!Src0Reg || !Src1Reg)
1949 emitInst(DivOpc).addReg(Src0Reg).addReg(Src1Reg);
1950 emitInst(Mips::TEQ).addReg(Src1Reg).addReg(Mips::ZERO).addImm(7);
1952 Register ResultReg = createResultReg(&Mips::GPR32RegClass);
1959 emitInst(MFOpc, ResultReg);
1961 updateValueMap(
I, ResultReg);
1968 if (!isTypeSupported(
I->getType(), RetVT))
1971 Register ResultReg = createResultReg(&Mips::GPR32RegClass);
1975 unsigned Opcode =
I->getOpcode();
1976 const Value *Op0 =
I->getOperand(0);
1977 Register Op0Reg = getRegForValue(Op0);
1982 if (Opcode == Instruction::AShr || Opcode == Instruction::LShr) {
1983 Register TempReg = createResultReg(&Mips::GPR32RegClass);
1987 MVT Op0MVT = TLI.getValueType(
DL, Op0->
getType(),
true).getSimpleVT();
1988 bool IsZExt = Opcode == Instruction::LShr;
1989 if (!emitIntExt(Op0MVT, Op0Reg, MVT::i32, TempReg, IsZExt))
1995 if (
const auto *
C = dyn_cast<ConstantInt>(
I->getOperand(1))) {
2001 case Instruction::Shl:
2004 case Instruction::AShr:
2007 case Instruction::LShr:
2012 emitInst(Opcode, ResultReg).addReg(Op0Reg).addImm(ShiftVal);
2013 updateValueMap(
I, ResultReg);
2017 Register Op1Reg = getRegForValue(
I->getOperand(1));
2024 case Instruction::Shl:
2025 Opcode = Mips::SLLV;
2027 case Instruction::AShr:
2028 Opcode = Mips::SRAV;
2030 case Instruction::LShr:
2031 Opcode = Mips::SRLV;
2035 emitInst(Opcode, ResultReg).addReg(Op0Reg).addReg(Op1Reg);
2036 updateValueMap(
I, ResultReg);
2040bool MipsFastISel::fastSelectInstruction(
const Instruction *
I) {
2041 switch (
I->getOpcode()) {
2044 case Instruction::Load:
2045 return selectLoad(
I);
2046 case Instruction::Store:
2047 return selectStore(
I);
2048 case Instruction::SDiv:
2052 case Instruction::UDiv:
2056 case Instruction::SRem:
2060 case Instruction::URem:
2064 case Instruction::Shl:
2065 case Instruction::LShr:
2066 case Instruction::AShr:
2067 return selectShift(
I);
2068 case Instruction::And:
2069 case Instruction::Or:
2070 case Instruction::Xor:
2071 return selectLogicalOp(
I);
2072 case Instruction::Br:
2073 return selectBranch(
I);
2074 case Instruction::Ret:
2075 return selectRet(
I);
2076 case Instruction::Trunc:
2077 return selectTrunc(
I);
2078 case Instruction::ZExt:
2079 case Instruction::SExt:
2080 return selectIntExt(
I);
2081 case Instruction::FPTrunc:
2082 return selectFPTrunc(
I);
2083 case Instruction::FPExt:
2084 return selectFPExt(
I);
2085 case Instruction::FPToSI:
2086 return selectFPToInt(
I,
true);
2087 case Instruction::FPToUI:
2088 return selectFPToInt(
I,
false);
2089 case Instruction::ICmp:
2090 case Instruction::FCmp:
2091 return selectCmp(
I);
2092 case Instruction::Select:
2093 return selectSelect(
I);
2098unsigned MipsFastISel::getRegEnsuringSimpleIntegerWidening(
const Value *V,
2103 MVT VMVT = TLI.getValueType(
DL,
V->getType(),
true).getSimpleVT();
2105 if (VMVT == MVT::i1)
2108 if ((VMVT == MVT::i8) || (VMVT == MVT::i16)) {
2109 Register TempReg = createResultReg(&Mips::GPR32RegClass);
2110 if (!emitIntExt(VMVT, VReg, MVT::i32, TempReg, IsUnsigned))
2117void MipsFastISel::simplifyAddress(Address &
Addr) {
2118 if (!isInt<16>(
Addr.getOffset())) {
2120 materialize32BitInt(
Addr.getOffset(), &Mips::GPR32RegClass);
2121 Register DestReg = createResultReg(&Mips::GPR32RegClass);
2122 emitInst(Mips::ADDu, DestReg).addReg(TempReg).addReg(
Addr.getReg());
2123 Addr.setReg(DestReg);
2128unsigned MipsFastISel::fastEmitInst_rr(
unsigned MachineInstOpcode,
2130 unsigned Op0,
unsigned Op1) {
2137 if (MachineInstOpcode == Mips::MUL) {
2138 Register ResultReg = createResultReg(RC);
2142 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II, ResultReg)
2157 return new MipsFastISel(funcInfo, libInfo);
unsigned const MachineRegisterInfo * MRI
static unsigned selectBinaryOp(unsigned GenericOpc, unsigned RegBankID, unsigned OpSize)
Select the AArch64 opcode for the basic binary operation GenericOpc (such as G_OR or G_SDIV),...
static void emitLoad(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator Pos, const TargetInstrInfo &TII, unsigned Reg1, unsigned Reg2, int Offset, bool IsPostDec)
Emit a load-pair instruction for frame-destroy.
static void emitStore(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator Pos, const TargetInstrInfo &TII, unsigned Reg1, unsigned Reg2, int Offset, bool IsPreDec)
Emit a store-pair instruction for frame-setup.
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file contains the simple types necessary to represent the attributes associated with functions a...
#define LLVM_ATTRIBUTE_UNUSED
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines the DenseMap class.
This file defines the FastISel class.
const HexagonInstrInfo * TII
unsigned const TargetRegisterInfo * TRI
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
cl::opt< bool > EmitJalrReloc
static bool CC_MipsO32_FP64(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
static bool CC_Mips(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State) LLVM_ATTRIBUTE_UNUSED
static bool CC_MipsO32_FP32(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State, ArrayRef< MCPhysReg > F64Regs)
uint64_t IntrinsicInst * II
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallVector class.
static SDValue emitCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
This file describes how to lower LLVM code to machine code.
support::ulittle16_t & Lo
support::ulittle16_t & Hi
APInt bitcastToAPInt() const
uint64_t getZExtValue() const
Get zero extended value.
an instruction to allocate memory on the stack
PointerType * getType() const
Overload to return most specific pointer type.
Conditional or Unconditional Branch instruction.
BasicBlock * getSuccessor(unsigned i) const
Value * getCondition() const
CCState - This class holds information needed while lowering arguments and return values.
CCValAssign - Represent assignment of one arg/retval to a location.
void convertToReg(MCRegister Reg)
Register getLocReg() const
LocInfo getLocInfo() const
int64_t getLocMemOffset() const
unsigned getValNo() const
This class is the base class for the comparison instructions.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ ICMP_ULT
unsigned less than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ ICMP_SGE
signed greater or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_ULE
unsigned less or equal
Predicate getPredicate() const
Return the predicate for this instruction.
A constant value that is initialized with an expression using other constant values.
ConstantFP - Floating Point Values [float, double].
const APFloat & getValueAPF() const
This is the shared class of boolean and integer constants.
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
This is an important base class in LLVM.
This class represents an Operation in the Expression.
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
virtual bool fastLowerIntrinsicCall(const IntrinsicInst *II)
This method is called by target-independent code to do target- specific intrinsic lowering.
virtual unsigned fastMaterializeConstant(const Constant *C)
Emit a constant in a register using target-specific logic, such as constant pool loads.
virtual bool fastLowerCall(CallLoweringInfo &CLI)
This method is called by target-independent code to do target- specific call lowering.
Register fastEmitInst_rr(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, unsigned Op1)
Emit a MachineInstr with two register operands and a result register in the given register class.
virtual bool fastLowerArguments()
This method is called by target-independent code to do target- specific argument lowering.
const TargetInstrInfo & TII
virtual bool fastSelectInstruction(const Instruction *I)=0
This method is called by target-independent code when the normal FastISel process fails to select an ...
const TargetLowering & TLI
virtual unsigned fastMaterializeAlloca(const AllocaInst *C)
Emit an alloca address in a register using target-specific logic.
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
bool isThreadLocal() const
If the value is "Thread Local", its value isn't shared by the threads.
bool hasLocalLinkage() const
bool hasInternalLinkage() const
bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
An instruction for reading from memory.
Align getAlign() const
Return the alignment of the access that is being performed.
Describe properties that are true of each instruction in the target description file.
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
Value * getLength() const
This class wraps the llvm.memset and llvm.memset.inline intrinsics.
MipsFunctionInfo - This class is derived from MachineFunction private Mips target-specific informatio...
bool useSoftFloat() const
const MipsInstrInfo * getInstrInfo() const override
const MipsTargetLowering * getTargetLowering() const override
Wrapper class representing virtual and physical registers.
Return a value (possibly void), from a function.
This class represents the LLVM 'select' instruction.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
TypeSize getElementOffset(unsigned Idx) const
Class to represent struct types.
TargetInstrInfo - Interface to description of machine instruction set.
Provides information about what library functions are available for the current target.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Primary interface to the complete machine description for the target machine.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isArrayTy() const
True if this is an instance of ArrayType.
bool isStructTy() const
True if this is an instance of StructType.
bool isIntegerTy() const
True if this is an instance of IntegerType.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
StructType * getStructTypeOrNull() const
TypeSize getSequentialElementStride(const DataLayout &DL) const
const ParentTy * getParent() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
@ AND
Bitwise operators - logical and, logical or, logical xor.
Flag
These should be considered private to the implementation of the MCInstrDesc class.
@ MO_GOT
MO_GOT - Represents the offset into the global offset table at which the address the relocation entry...
@ MO_JALR
Helper operand used to generate R_MIPS_JALR.
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
Reg
All possible values of the reg field in the ModR/M byte.
This is an optimization pass for GlobalISel generic memory operations.
Register constrainOperandRegClass(const MachineFunction &MF, const TargetRegisterInfo &TRI, MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, MachineInstr &InsertPt, const TargetRegisterClass &RegClass, MachineOperand &RegMO)
Constrain the Register operand OpIdx, so that it is now constrained to the TargetRegisterClass passed...
void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags,...
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
static Error getOffset(const SymbolRef &Sym, SectionRef Sec, uint64_t &Result)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
unsigned getKillRegState(bool B)
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
gep_type_iterator gep_type_begin(const User *GEP)
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
bool isVector() const
Return true if this is a vector value type.
static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.