37#include "llvm/IR/IntrinsicsX86.h"
47class X86FastISel final :
public FastISel {
72#include "X86GenFastISel.inc"
75 bool X86FastEmitCompare(
const Value *LHS,
const Value *RHS,
EVT VT,
79 unsigned &ResultReg,
unsigned Alignment = 1);
120 bool X86SelectFPExtOrFPTrunc(
const Instruction *
I,
unsigned Opc,
127 bool X86SelectIntToFP(
const Instruction *
I,
bool IsSigned);
130 return Subtarget->getInstrInfo();
149 bool isScalarFPTypeInSSEReg(
EVT VT)
const {
150 return (VT == MVT::f64 && Subtarget->hasSSE2()) ||
151 (VT == MVT::f32 && Subtarget->hasSSE1()) || VT == MVT::f16;
154 bool isTypeLegal(
Type *Ty,
MVT &VT,
bool AllowI1 =
false);
167 unsigned fastEmitInst_rrrr(
unsigned MachineInstOpcode,
169 unsigned Op1,
unsigned Op2,
unsigned Op3);
174static std::pair<unsigned, bool>
177 bool NeedSwap =
false;
206 return std::make_pair(
CC, NeedSwap);
220 return ::addFullAddress(MIB, AM);
227 if (!isa<ExtractValueInst>(
Cond))
230 const auto *EV = cast<ExtractValueInst>(
Cond);
231 if (!isa<IntrinsicInst>(EV->getAggregateOperand()))
234 const auto *
II = cast<IntrinsicInst>(EV->getAggregateOperand());
238 cast<StructType>(
Callee->getReturnType())->getTypeAtIndex(0U);
239 if (!isTypeLegal(
RetTy, RetVT))
242 if (RetVT != MVT::i32 && RetVT != MVT::i64)
246 switch (
II->getIntrinsicID()) {
247 default:
return false;
248 case Intrinsic::sadd_with_overflow:
249 case Intrinsic::ssub_with_overflow:
250 case Intrinsic::smul_with_overflow:
251 case Intrinsic::umul_with_overflow: TmpCC =
X86::COND_O;
break;
252 case Intrinsic::uadd_with_overflow:
253 case Intrinsic::usub_with_overflow: TmpCC =
X86::COND_B;
break;
257 if (
II->getParent() !=
I->getParent())
263 for (
auto Itr = std::prev(Start); Itr !=
End; --Itr) {
266 if (!isa<ExtractValueInst>(Itr))
270 const auto *EVI = cast<ExtractValueInst>(Itr);
271 if (EVI->getAggregateOperand() !=
II)
277 auto HasPhis = [](
const BasicBlock *Succ) {
return !Succ->phis().empty(); };
290bool X86FastISel::isTypeLegal(
Type *Ty,
MVT &VT,
bool AllowI1) {
291 EVT evt = TLI.getValueType(
DL, Ty,
true);
292 if (evt == MVT::Other || !evt.
isSimple())
299 if (VT == MVT::f64 && !Subtarget->hasSSE2())
301 if (VT == MVT::f32 && !Subtarget->hasSSE1())
310 return (AllowI1 && VT == MVT::i1) || TLI.isTypeLegal(VT);
318 unsigned Alignment) {
319 bool HasSSE1 = Subtarget->hasSSE1();
320 bool HasSSE2 = Subtarget->hasSSE2();
321 bool HasSSE41 = Subtarget->hasSSE41();
322 bool HasAVX = Subtarget->hasAVX();
323 bool HasAVX2 = Subtarget->hasAVX2();
324 bool HasAVX512 = Subtarget->hasAVX512();
325 bool HasVLX = Subtarget->hasVLX();
335 default:
return false;
350 Opc = HasAVX512 ? X86::VMOVSSZrm_alt
351 : HasAVX ? X86::VMOVSSrm_alt
352 : HasSSE1 ? X86::MOVSSrm_alt
356 Opc = HasAVX512 ? X86::VMOVSDZrm_alt
357 : HasAVX ? X86::VMOVSDrm_alt
358 : HasSSE2 ? X86::MOVSDrm_alt
365 if (IsNonTemporal && Alignment >= 16 && HasSSE41)
366 Opc = HasVLX ? X86::VMOVNTDQAZ128rm :
367 HasAVX ? X86::VMOVNTDQArm : X86::MOVNTDQArm;
368 else if (Alignment >= 16)
369 Opc = HasVLX ? X86::VMOVAPSZ128rm :
370 HasAVX ? X86::VMOVAPSrm : X86::MOVAPSrm;
372 Opc = HasVLX ? X86::VMOVUPSZ128rm :
373 HasAVX ? X86::VMOVUPSrm : X86::MOVUPSrm;
376 if (IsNonTemporal && Alignment >= 16 && HasSSE41)
377 Opc = HasVLX ? X86::VMOVNTDQAZ128rm :
378 HasAVX ? X86::VMOVNTDQArm : X86::MOVNTDQArm;
379 else if (Alignment >= 16)
380 Opc = HasVLX ? X86::VMOVAPDZ128rm :
381 HasAVX ? X86::VMOVAPDrm : X86::MOVAPDrm;
383 Opc = HasVLX ? X86::VMOVUPDZ128rm :
384 HasAVX ? X86::VMOVUPDrm : X86::MOVUPDrm;
390 if (IsNonTemporal && Alignment >= 16 && HasSSE41)
391 Opc = HasVLX ? X86::VMOVNTDQAZ128rm :
392 HasAVX ? X86::VMOVNTDQArm : X86::MOVNTDQArm;
393 else if (Alignment >= 16)
394 Opc = HasVLX ? X86::VMOVDQA64Z128rm :
395 HasAVX ? X86::VMOVDQArm : X86::MOVDQArm;
397 Opc = HasVLX ? X86::VMOVDQU64Z128rm :
398 HasAVX ? X86::VMOVDQUrm : X86::MOVDQUrm;
402 if (IsNonTemporal && Alignment >= 32 && HasAVX2)
403 Opc = HasVLX ? X86::VMOVNTDQAZ256rm : X86::VMOVNTDQAYrm;
404 else if (IsNonTemporal && Alignment >= 16)
406 else if (Alignment >= 32)
407 Opc = HasVLX ? X86::VMOVAPSZ256rm : X86::VMOVAPSYrm;
409 Opc = HasVLX ? X86::VMOVUPSZ256rm : X86::VMOVUPSYrm;
413 if (IsNonTemporal && Alignment >= 32 && HasAVX2)
414 Opc = HasVLX ? X86::VMOVNTDQAZ256rm : X86::VMOVNTDQAYrm;
415 else if (IsNonTemporal && Alignment >= 16)
417 else if (Alignment >= 32)
418 Opc = HasVLX ? X86::VMOVAPDZ256rm : X86::VMOVAPDYrm;
420 Opc = HasVLX ? X86::VMOVUPDZ256rm : X86::VMOVUPDYrm;
427 if (IsNonTemporal && Alignment >= 32 && HasAVX2)
428 Opc = HasVLX ? X86::VMOVNTDQAZ256rm : X86::VMOVNTDQAYrm;
429 else if (IsNonTemporal && Alignment >= 16)
431 else if (Alignment >= 32)
432 Opc = HasVLX ? X86::VMOVDQA64Z256rm : X86::VMOVDQAYrm;
434 Opc = HasVLX ? X86::VMOVDQU64Z256rm : X86::VMOVDQUYrm;
438 if (IsNonTemporal && Alignment >= 64)
439 Opc = X86::VMOVNTDQAZrm;
441 Opc = (Alignment >= 64) ? X86::VMOVAPSZrm : X86::VMOVUPSZrm;
445 if (IsNonTemporal && Alignment >= 64)
446 Opc = X86::VMOVNTDQAZrm;
448 Opc = (Alignment >= 64) ? X86::VMOVAPDZrm : X86::VMOVUPDZrm;
457 if (IsNonTemporal && Alignment >= 64)
458 Opc = X86::VMOVNTDQAZrm;
460 Opc = (Alignment >= 64) ? X86::VMOVDQA64Zrm : X86::VMOVDQU64Zrm;
466 ResultReg = createResultReg(RC);
468 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(Opc), ResultReg);
481 bool HasSSE1 = Subtarget->hasSSE1();
482 bool HasSSE2 = Subtarget->hasSSE2();
483 bool HasSSE4A = Subtarget->hasSSE4A();
484 bool HasAVX = Subtarget->hasAVX();
485 bool HasAVX512 = Subtarget->hasAVX512();
486 bool HasVLX = Subtarget->hasVLX();
493 default:
return false;
496 Register AndResult = createResultReg(&X86::GR8RegClass);
497 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
498 TII.get(X86::AND8ri), AndResult)
503 case MVT::i8: Opc = X86::MOV8mr;
break;
504 case MVT::i16: Opc = X86::MOV16mr;
break;
506 Opc = (IsNonTemporal && HasSSE2) ? X86::MOVNTImr : X86::MOV32mr;
510 Opc = (IsNonTemporal && HasSSE2) ? X86::MOVNTI_64mr : X86::MOV64mr;
514 if (IsNonTemporal && HasSSE4A)
517 Opc = HasAVX512 ? X86::VMOVSSZmr :
518 HasAVX ? X86::VMOVSSmr : X86::MOVSSmr;
524 if (IsNonTemporal && HasSSE4A)
527 Opc = HasAVX512 ? X86::VMOVSDZmr :
528 HasAVX ? X86::VMOVSDmr : X86::MOVSDmr;
533 Opc = (IsNonTemporal && HasSSE1) ? X86::MMX_MOVNTQmr : X86::MMX_MOVQ64mr;
538 Opc = HasVLX ? X86::VMOVNTPSZ128mr :
539 HasAVX ? X86::VMOVNTPSmr : X86::MOVNTPSmr;
541 Opc = HasVLX ? X86::VMOVAPSZ128mr :
542 HasAVX ? X86::VMOVAPSmr : X86::MOVAPSmr;
544 Opc = HasVLX ? X86::VMOVUPSZ128mr :
545 HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr;
550 Opc = HasVLX ? X86::VMOVNTPDZ128mr :
551 HasAVX ? X86::VMOVNTPDmr : X86::MOVNTPDmr;
553 Opc = HasVLX ? X86::VMOVAPDZ128mr :
554 HasAVX ? X86::VMOVAPDmr : X86::MOVAPDmr;
556 Opc = HasVLX ? X86::VMOVUPDZ128mr :
557 HasAVX ? X86::VMOVUPDmr : X86::MOVUPDmr;
565 Opc = HasVLX ? X86::VMOVNTDQZ128mr :
566 HasAVX ? X86::VMOVNTDQmr : X86::MOVNTDQmr;
568 Opc = HasVLX ? X86::VMOVDQA64Z128mr :
569 HasAVX ? X86::VMOVDQAmr : X86::MOVDQAmr;
571 Opc = HasVLX ? X86::VMOVDQU64Z128mr :
572 HasAVX ? X86::VMOVDQUmr : X86::MOVDQUmr;
578 Opc = HasVLX ? X86::VMOVNTPSZ256mr : X86::VMOVNTPSYmr;
580 Opc = HasVLX ? X86::VMOVAPSZ256mr : X86::VMOVAPSYmr;
582 Opc = HasVLX ? X86::VMOVUPSZ256mr : X86::VMOVUPSYmr;
588 Opc = HasVLX ? X86::VMOVNTPDZ256mr : X86::VMOVNTPDYmr;
590 Opc = HasVLX ? X86::VMOVAPDZ256mr : X86::VMOVAPDYmr;
592 Opc = HasVLX ? X86::VMOVUPDZ256mr : X86::VMOVUPDYmr;
601 Opc = HasVLX ? X86::VMOVNTDQZ256mr : X86::VMOVNTDQYmr;
603 Opc = HasVLX ? X86::VMOVDQA64Z256mr : X86::VMOVDQAYmr;
605 Opc = HasVLX ? X86::VMOVDQU64Z256mr : X86::VMOVDQUYmr;
610 Opc = IsNonTemporal ? X86::VMOVNTPSZmr : X86::VMOVAPSZmr;
612 Opc = X86::VMOVUPSZmr;
617 Opc = IsNonTemporal ? X86::VMOVNTPDZmr : X86::VMOVAPDZmr;
619 Opc = X86::VMOVUPDZmr;
629 Opc = IsNonTemporal ? X86::VMOVNTDQZmr : X86::VMOVDQA64Zmr;
631 Opc = X86::VMOVDQU64Zmr;
644 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
Desc);
652bool X86FastISel::X86FastEmitStore(
EVT VT,
const Value *Val,
656 if (isa<ConstantPointerNull>(Val))
660 if (
const ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
668 case MVT::i8: Opc = X86::MOV8mi;
break;
669 case MVT::i16: Opc = X86::MOV16mi;
break;
670 case MVT::i32: Opc = X86::MOV32mi;
break;
673 if (isInt<32>(CI->getSExtValue()))
674 Opc = X86::MOV64mi32;
680 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(Opc));
682 : CI->getZExtValue());
689 Register ValReg = getRegForValue(Val);
693 return X86FastEmitStore(VT, ValReg, AM, MMO,
Aligned);
700 unsigned Src,
EVT SrcVT,
701 unsigned &ResultReg) {
712 if (
const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
719 if (
TM.isLargeGlobalValue(GV))
723 if (GV->isThreadLocal())
727 if (GV->isAbsoluteSymbolRef())
733 if (!Subtarget->isPICStyleRIPRel() ||
739 unsigned char GVFlags = Subtarget->classifyGlobalReference(GV);
744 AM.
Base.
Reg = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
750 if (Subtarget->isPICStyleRIPRel()) {
764 if (
I != LocalValueMap.end() &&
I->second) {
776 SavePoint SaveInsertPt = enterLocalValueArea();
778 if (TLI.getPointerTy(
DL) == MVT::i64) {
780 RC = &X86::GR64RegClass;
783 RC = &X86::GR32RegClass;
790 LoadReg = createResultReg(RC);
792 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(Opc), LoadReg);
796 leaveLocalValueArea(SaveInsertPt);
799 LocalValueMap[
V] = LoadReg;
811 if (!AM.
GV || !Subtarget->isPICStyleRIPRel()) {
813 AM.
Base.
Reg = getRegForValue(V);
831 const User *
U =
nullptr;
832 unsigned Opcode = Instruction::UserOp1;
837 if (FuncInfo.StaticAllocaMap.count(
static_cast<const AllocaInst *
>(V)) ||
838 FuncInfo.getMBB(
I->getParent()) == FuncInfo.MBB) {
839 Opcode =
I->getOpcode();
842 }
else if (
const ConstantExpr *
C = dyn_cast<ConstantExpr>(V)) {
843 Opcode =
C->getOpcode();
847 if (
PointerType *Ty = dyn_cast<PointerType>(
V->getType()))
848 if (Ty->getAddressSpace() > 255)
855 case Instruction::BitCast:
859 case Instruction::IntToPtr:
861 if (TLI.getValueType(
DL,
U->getOperand(0)->getType()) ==
862 TLI.getPointerTy(
DL))
866 case Instruction::PtrToInt:
868 if (TLI.getValueType(
DL,
U->getType()) == TLI.getPointerTy(
DL))
872 case Instruction::Alloca: {
876 FuncInfo.StaticAllocaMap.find(
A);
877 if (SI != FuncInfo.StaticAllocaMap.end()) {
885 case Instruction::Add: {
887 if (
const ConstantInt *CI = dyn_cast<ConstantInt>(
U->getOperand(1))) {
890 if (isInt<32>(Disp)) {
898 case Instruction::GetElementPtr: {
904 unsigned Scale = AM.
Scale;
909 i !=
e; ++i, ++GTI) {
911 if (
StructType *STy = GTI.getStructTypeOrNull()) {
919 uint64_t S = GTI.getSequentialElementStride(
DL);
923 Disp += CI->getSExtValue() * S;
926 if (canFoldAddIntoGEP(U,
Op)) {
929 cast<ConstantInt>(cast<AddOperator>(
Op)->getOperand(1));
932 Op = cast<AddOperator>(
Op)->getOperand(0);
936 (!AM.
GV || !Subtarget->isPICStyleRIPRel()) &&
937 (S == 1 || S == 2 || S == 4 || S == 8)) {
940 IndexReg = getRegForGEPIndex(
Op);
946 goto unsupported_gep;
951 if (!isInt<32>(Disp))
960 dyn_cast<GetElementPtrInst>(
U->getOperand(0))) {
974 if (handleConstantAddresses(
I, AM))
984 return handleConstantAddresses(V, AM);
990 const User *
U =
nullptr;
991 unsigned Opcode = Instruction::UserOp1;
1018 Opcode =
I->getOpcode();
1020 InMBB =
I->getParent() == FuncInfo.MBB->getBasicBlock();
1021 }
else if (
const ConstantExpr *
C = dyn_cast<ConstantExpr>(V)) {
1022 Opcode =
C->getOpcode();
1028 case Instruction::BitCast:
1031 return X86SelectCallAddress(
U->getOperand(0), AM);
1034 case Instruction::IntToPtr:
1037 TLI.getValueType(
DL,
U->getOperand(0)->getType()) ==
1038 TLI.getPointerTy(
DL))
1039 return X86SelectCallAddress(
U->getOperand(0), AM);
1042 case Instruction::PtrToInt:
1044 if (InMBB && TLI.getValueType(
DL,
U->getType()) == TLI.getPointerTy(
DL))
1045 return X86SelectCallAddress(
U->getOperand(0), AM);
1050 if (
const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
1057 if (Subtarget->isPICStyleRIPRel() &&
1063 if (GVar->isThreadLocal())
1072 if (Subtarget->isPICStyleRIPRel()) {
1078 AM.
GVOpFlags = Subtarget->classifyLocalReference(
nullptr);
1085 if (!AM.
GV || !Subtarget->isPICStyleRIPRel()) {
1086 auto GetCallRegForValue = [
this](
const Value *
V) {
1090 if (Reg && Subtarget->isTarget64BitILP32()) {
1091 Register CopyReg = createResultReg(&X86::GR32RegClass);
1092 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(X86::MOV32rr),
1096 Register ExtReg = createResultReg(&X86::GR64RegClass);
1097 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1098 TII.get(TargetOpcode::SUBREG_TO_REG), ExtReg)
1109 AM.
Base.
Reg = GetCallRegForValue(V);
1114 AM.
IndexReg = GetCallRegForValue(V);
1124bool X86FastISel::X86SelectStore(
const Instruction *
I) {
1131 const Value *PtrV =
I->getOperand(1);
1132 if (TLI.supportSwiftError()) {
1135 if (
const Argument *Arg = dyn_cast<Argument>(PtrV)) {
1136 if (Arg->hasSwiftErrorAttr())
1140 if (
const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
1141 if (Alloca->isSwiftError())
1150 if (!isTypeLegal(Val->
getType(), VT,
true))
1155 bool Aligned = Alignment >= ABIAlignment;
1161 return X86FastEmitStore(VT, Val, AM, createMachineMemOperandFor(
I),
Aligned);
1167 const Function &
F = *
I->getParent()->getParent();
1171 if (!FuncInfo.CanLowerReturn)
1174 if (TLI.supportSwiftError() &&
1175 F.getAttributes().hasAttrSomewhere(Attribute::SwiftError))
1178 if (TLI.supportSplitCSR(FuncInfo.MF))
1210 if (
Ret->getNumOperands() > 0) {
1216 CCState CCInfo(
CC,
F.isVarArg(), *FuncInfo.MF, ValLocs,
I->getContext());
1219 const Value *RV =
Ret->getOperand(0);
1225 if (ValLocs.
size() != 1)
1246 if (SrcVT != DstVT) {
1247 if (SrcVT != MVT::i1 && SrcVT != MVT::i8 && SrcVT != MVT::i16)
1250 if (!Outs[0].
Flags.isZExt() && !Outs[0].Flags.isSExt())
1253 if (SrcVT == MVT::i1) {
1254 if (Outs[0].
Flags.isSExt())
1256 SrcReg = fastEmitZExtFromI1(MVT::i8, SrcReg);
1259 if (SrcVT != DstVT) {
1273 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1274 TII.get(TargetOpcode::COPY), DstReg).
addReg(SrcReg);
1291 "SRetReturnReg should have been set in LowerFormalArguments()!");
1292 unsigned RetReg = Subtarget->isTarget64BitLP64() ? X86::RAX : X86::EAX;
1293 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1294 TII.get(TargetOpcode::COPY), RetReg).
addReg(Reg);
1301 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1302 TII.get(Subtarget->is64Bit() ? X86::RETI64 : X86::RETI32))
1305 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1306 TII.get(Subtarget->is64Bit() ? X86::RET64 : X86::RET32));
1308 for (
unsigned Reg : RetRegs)
1322 const Value *SV =
I->getOperand(0);
1323 if (TLI.supportSwiftError()) {
1326 if (
const Argument *Arg = dyn_cast<Argument>(SV)) {
1327 if (Arg->hasSwiftErrorAttr())
1331 if (
const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
1332 if (Alloca->isSwiftError())
1338 if (!isTypeLegal(LI->
getType(), VT,
true))
1347 unsigned ResultReg = 0;
1348 if (!X86FastEmitLoad(VT, AM, createMachineMemOperandFor(LI), ResultReg,
1352 updateValueMap(
I, ResultReg);
1357 bool HasAVX512 = Subtarget->
hasAVX512();
1358 bool HasAVX = Subtarget->
hasAVX();
1359 bool HasSSE1 = Subtarget->
hasSSE1();
1360 bool HasSSE2 = Subtarget->
hasSSE2();
1364 case MVT::i8:
return X86::CMP8rr;
1365 case MVT::i16:
return X86::CMP16rr;
1366 case MVT::i32:
return X86::CMP32rr;
1367 case MVT::i64:
return X86::CMP64rr;
1369 return HasAVX512 ? X86::VUCOMISSZrr
1370 : HasAVX ? X86::VUCOMISSrr
1371 : HasSSE1 ? X86::UCOMISSrr
1374 return HasAVX512 ? X86::VUCOMISDZrr
1375 : HasAVX ? X86::VUCOMISDrr
1376 : HasSSE2 ? X86::UCOMISDrr
1391 return X86::CMP16ri;
1393 return X86::CMP32ri;
1397 return isInt<32>(RHSC->
getSExtValue()) ? X86::CMP64ri32 : 0;
1401bool X86FastISel::X86FastEmitCompare(
const Value *Op0,
const Value *Op1,
EVT VT,
1403 Register Op0Reg = getRegForValue(Op0);
1404 if (Op0Reg == 0)
return false;
1407 if (isa<ConstantPointerNull>(Op1))
1413 if (
const ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
1415 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, CurMIMD,
TII.get(CompareImmOpc))
1417 .
addImm(Op1C->getSExtValue());
1423 if (CompareOpc == 0)
return false;
1425 Register Op1Reg = getRegForValue(Op1);
1426 if (Op1Reg == 0)
return false;
1427 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, CurMIMD,
TII.get(CompareOpc))
1435 const CmpInst *CI = cast<CmpInst>(
I);
1438 if (!isTypeLegal(
I->getOperand(0)->getType(), VT))
1447 unsigned ResultReg = 0;
1448 switch (Predicate) {
1451 ResultReg = createResultReg(&X86::GR32RegClass);
1452 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(X86::MOV32r0),
1454 ResultReg = fastEmitInst_extractsubreg(MVT::i8, ResultReg, X86::sub_8bit);
1460 ResultReg = createResultReg(&X86::GR8RegClass);
1461 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(X86::MOV8ri),
1468 updateValueMap(
I, ResultReg);
1479 const auto *RHSC = dyn_cast<ConstantFP>(RHS);
1480 if (RHSC && RHSC->isNullValue())
1485 static const uint16_t SETFOpcTable[2][3] = {
1490 switch (Predicate) {
1496 ResultReg = createResultReg(&X86::GR8RegClass);
1498 if (!X86FastEmitCompare(LHS, RHS, VT,
I->getDebugLoc()))
1501 Register FlagReg1 = createResultReg(&X86::GR8RegClass);
1502 Register FlagReg2 = createResultReg(&X86::GR8RegClass);
1503 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(X86::SETCCr),
1504 FlagReg1).
addImm(SETFOpc[0]);
1505 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(X86::SETCCr),
1506 FlagReg2).
addImm(SETFOpc[1]);
1507 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(SETFOpc[2]),
1509 updateValueMap(
I, ResultReg);
1522 if (!X86FastEmitCompare(LHS, RHS, VT,
I->getDebugLoc()))
1525 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(X86::SETCCr),
1527 updateValueMap(
I, ResultReg);
1532 EVT DstVT = TLI.getValueType(
DL,
I->getType());
1533 if (!TLI.isTypeLegal(DstVT))
1536 Register ResultReg = getRegForValue(
I->getOperand(0));
1541 MVT SrcVT = TLI.getSimpleValueType(
DL,
I->getOperand(0)->getType());
1542 if (SrcVT == MVT::i1) {
1544 ResultReg = fastEmitZExtFromI1(MVT::i8, ResultReg);
1551 if (DstVT == MVT::i64) {
1556 case MVT::i8: MovInst = X86::MOVZX32rr8;
break;
1557 case MVT::i16: MovInst = X86::MOVZX32rr16;
break;
1558 case MVT::i32: MovInst = X86::MOV32rr;
break;
1562 Register Result32 = createResultReg(&X86::GR32RegClass);
1563 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(MovInst), Result32)
1566 ResultReg = createResultReg(&X86::GR64RegClass);
1567 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(TargetOpcode::SUBREG_TO_REG),
1570 }
else if (DstVT == MVT::i16) {
1573 Register Result32 = createResultReg(&X86::GR32RegClass);
1574 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(X86::MOVZX32rr8),
1575 Result32).
addReg(ResultReg);
1577 ResultReg = fastEmitInst_extractsubreg(MVT::i16, Result32, X86::sub_16bit);
1578 }
else if (DstVT != MVT::i8) {
1585 updateValueMap(
I, ResultReg);
1590 EVT DstVT = TLI.getValueType(
DL,
I->getType());
1591 if (!TLI.isTypeLegal(DstVT))
1594 Register ResultReg = getRegForValue(
I->getOperand(0));
1599 MVT SrcVT = TLI.getSimpleValueType(
DL,
I->getOperand(0)->getType());
1600 if (SrcVT == MVT::i1) {
1602 Register ZExtReg = fastEmitZExtFromI1(MVT::i8, ResultReg);
1607 ResultReg = createResultReg(&X86::GR8RegClass);
1608 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(X86::NEG8r),
1609 ResultReg).
addReg(ZExtReg);
1614 if (DstVT == MVT::i16) {
1617 Register Result32 = createResultReg(&X86::GR32RegClass);
1618 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(X86::MOVSX32rr8),
1619 Result32).
addReg(ResultReg);
1621 ResultReg = fastEmitInst_extractsubreg(MVT::i16, Result32, X86::sub_16bit);
1622 }
else if (DstVT != MVT::i8) {
1629 updateValueMap(
I, ResultReg);
1633bool X86FastISel::X86SelectBranch(
const Instruction *
I) {
1650 switch (Predicate) {
1664 const auto *CmpRHSC = dyn_cast<ConstantFP>(CmpRHS);
1665 if (CmpRHSC && CmpRHSC->isNullValue())
1670 if (FuncInfo.MBB->isLayoutSuccessor(TrueMBB)) {
1680 bool NeedExtraBranch =
false;
1681 switch (Predicate) {
1687 NeedExtraBranch =
true;
1700 if (!X86FastEmitCompare(CmpLHS, CmpRHS, VT, CI->
getDebugLoc()))
1703 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(X86::JCC_1))
1708 if (NeedExtraBranch) {
1709 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(X86::JCC_1))
1713 finishCondBranch(BI->
getParent(), TrueMBB, FalseMBB);
1720 if (TI->hasOneUse() && TI->getParent() ==
I->getParent() &&
1721 isTypeLegal(TI->getOperand(0)->getType(), SourceVT)) {
1722 unsigned TestOpc = 0;
1725 case MVT::i8: TestOpc = X86::TEST8ri;
break;
1726 case MVT::i16: TestOpc = X86::TEST16ri;
break;
1727 case MVT::i32: TestOpc = X86::TEST32ri;
break;
1728 case MVT::i64: TestOpc = X86::TEST64ri32;
break;
1731 Register OpReg = getRegForValue(TI->getOperand(0));
1732 if (OpReg == 0)
return false;
1734 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(TestOpc))
1738 if (FuncInfo.MBB->isLayoutSuccessor(TrueMBB)) {
1743 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(X86::JCC_1))
1746 finishCondBranch(BI->
getParent(), TrueMBB, FalseMBB);
1757 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(X86::JCC_1))
1759 finishCondBranch(BI->
getParent(), TrueMBB, FalseMBB);
1767 if (OpReg == 0)
return false;
1770 if (
MRI.getRegClass(OpReg) == &X86::VK1RegClass) {
1771 unsigned KOpReg = OpReg;
1772 OpReg = createResultReg(&X86::GR32RegClass);
1773 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1774 TII.get(TargetOpcode::COPY), OpReg)
1776 OpReg = fastEmitInst_extractsubreg(MVT::i8, OpReg, X86::sub_8bit);
1778 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(X86::TEST8ri))
1781 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(X86::JCC_1))
1783 finishCondBranch(BI->
getParent(), TrueMBB, FalseMBB);
1787bool X86FastISel::X86SelectShift(
const Instruction *
I) {
1788 unsigned CReg = 0, OpReg = 0;
1790 if (
I->getType()->isIntegerTy(8)) {
1792 RC = &X86::GR8RegClass;
1793 switch (
I->getOpcode()) {
1794 case Instruction::LShr: OpReg = X86::SHR8rCL;
break;
1795 case Instruction::AShr: OpReg = X86::SAR8rCL;
break;
1796 case Instruction::Shl: OpReg = X86::SHL8rCL;
break;
1797 default:
return false;
1799 }
else if (
I->getType()->isIntegerTy(16)) {
1801 RC = &X86::GR16RegClass;
1802 switch (
I->getOpcode()) {
1804 case Instruction::LShr: OpReg = X86::SHR16rCL;
break;
1805 case Instruction::AShr: OpReg = X86::SAR16rCL;
break;
1806 case Instruction::Shl: OpReg = X86::SHL16rCL;
break;
1808 }
else if (
I->getType()->isIntegerTy(32)) {
1810 RC = &X86::GR32RegClass;
1811 switch (
I->getOpcode()) {
1813 case Instruction::LShr: OpReg = X86::SHR32rCL;
break;
1814 case Instruction::AShr: OpReg = X86::SAR32rCL;
break;
1815 case Instruction::Shl: OpReg = X86::SHL32rCL;
break;
1817 }
else if (
I->getType()->isIntegerTy(64)) {
1819 RC = &X86::GR64RegClass;
1820 switch (
I->getOpcode()) {
1822 case Instruction::LShr: OpReg = X86::SHR64rCL;
break;
1823 case Instruction::AShr: OpReg = X86::SAR64rCL;
break;
1824 case Instruction::Shl: OpReg = X86::SHL64rCL;
break;
1831 if (!isTypeLegal(
I->getType(), VT))
1834 Register Op0Reg = getRegForValue(
I->getOperand(0));
1835 if (Op0Reg == 0)
return false;
1837 Register Op1Reg = getRegForValue(
I->getOperand(1));
1838 if (Op1Reg == 0)
return false;
1839 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(TargetOpcode::COPY),
1844 if (CReg != X86::CL)
1845 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1846 TII.get(TargetOpcode::KILL), X86::CL)
1849 Register ResultReg = createResultReg(RC);
1850 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(OpReg), ResultReg)
1852 updateValueMap(
I, ResultReg);
1856bool X86FastISel::X86SelectDivRem(
const Instruction *
I) {
1857 const static unsigned NumTypes = 4;
1858 const static unsigned NumOps = 4;
1859 const static bool S =
true;
1860 const static bool U =
false;
1861 const static unsigned Copy = TargetOpcode::COPY;
1871 const static struct DivRemEntry {
1877 struct DivRemResult {
1879 unsigned OpSignExtend;
1883 unsigned DivRemResultReg;
1885 } ResultTable[NumOps];
1886 } OpTable[NumTypes] = {
1887 { &X86::GR8RegClass, X86::AX, 0, {
1888 { X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AL, S },
1889 { X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AH, S },
1890 { X86::DIV8r, 0, X86::MOVZX16rr8, X86::AL,
U },
1891 { X86::DIV8r, 0, X86::MOVZX16rr8, X86::AH,
U },
1894 { &X86::GR16RegClass, X86::AX, X86::DX, {
1895 { X86::IDIV16r, X86::CWD,
Copy, X86::AX, S },
1896 { X86::IDIV16r, X86::CWD,
Copy, X86::DX, S },
1897 { X86::DIV16r, X86::MOV32r0,
Copy, X86::AX,
U },
1898 { X86::DIV16r, X86::MOV32r0,
Copy, X86::DX,
U },
1901 { &X86::GR32RegClass, X86::EAX, X86::EDX, {
1902 { X86::IDIV32r, X86::CDQ,
Copy, X86::EAX, S },
1903 { X86::IDIV32r, X86::CDQ,
Copy, X86::EDX, S },
1904 { X86::DIV32r, X86::MOV32r0,
Copy, X86::EAX,
U },
1905 { X86::DIV32r, X86::MOV32r0,
Copy, X86::EDX,
U },
1908 { &X86::GR64RegClass, X86::RAX, X86::RDX, {
1909 { X86::IDIV64r, X86::CQO,
Copy, X86::RAX, S },
1910 { X86::IDIV64r, X86::CQO,
Copy, X86::RDX, S },
1911 { X86::DIV64r, X86::MOV32r0,
Copy, X86::RAX,
U },
1912 { X86::DIV64r, X86::MOV32r0,
Copy, X86::RDX,
U },
1918 if (!isTypeLegal(
I->getType(), VT))
1923 default:
return false;
1924 case MVT::i8: TypeIndex = 0;
break;
1925 case MVT::i16: TypeIndex = 1;
break;
1926 case MVT::i32: TypeIndex = 2;
break;
1927 case MVT::i64: TypeIndex = 3;
1928 if (!Subtarget->is64Bit())
1933 switch (
I->getOpcode()) {
1935 case Instruction::SDiv:
OpIndex = 0;
break;
1936 case Instruction::SRem:
OpIndex = 1;
break;
1937 case Instruction::UDiv:
OpIndex = 2;
break;
1938 case Instruction::URem:
OpIndex = 3;
break;
1941 const DivRemEntry &
TypeEntry = OpTable[TypeIndex];
1943 Register Op0Reg = getRegForValue(
I->getOperand(0));
1946 Register Op1Reg = getRegForValue(
I->getOperand(1));
1951 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1954 if (OpEntry.OpSignExtend) {
1955 if (OpEntry.IsOpSigned)
1956 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1957 TII.get(OpEntry.OpSignExtend));
1959 Register Zero32 = createResultReg(&X86::GR32RegClass);
1960 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1961 TII.get(X86::MOV32r0), Zero32);
1966 if (VT == MVT::i16) {
1967 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1969 .
addReg(Zero32, 0, X86::sub_16bit);
1970 }
else if (VT == MVT::i32) {
1971 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1974 }
else if (VT == MVT::i64) {
1975 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1976 TII.get(TargetOpcode::SUBREG_TO_REG),
TypeEntry.HighInReg)
1982 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1983 TII.get(OpEntry.OpDivRem)).
addReg(Op1Reg);
1992 unsigned ResultReg = 0;
1993 if ((
I->getOpcode() == Instruction::SRem ||
1994 I->getOpcode() == Instruction::URem) &&
1995 OpEntry.DivRemResultReg == X86::AH && Subtarget->is64Bit()) {
1996 Register SourceSuperReg = createResultReg(&X86::GR16RegClass);
1997 Register ResultSuperReg = createResultReg(&X86::GR16RegClass);
1998 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1999 TII.get(Copy), SourceSuperReg).
addReg(X86::AX);
2002 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(X86::SHR16ri),
2006 ResultReg = fastEmitInst_extractsubreg(MVT::i8, ResultSuperReg,
2011 ResultReg = createResultReg(
TypeEntry.RC);
2012 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(Copy), ResultReg)
2013 .
addReg(OpEntry.DivRemResultReg);
2015 updateValueMap(
I, ResultReg);
2022bool X86FastISel::X86FastEmitCMoveSelect(
MVT RetVT,
const Instruction *
I) {
2024 if (!Subtarget->canUseCMOV())
2028 if (RetVT < MVT::i16 || RetVT > MVT::i64)
2033 bool NeedTest =
true;
2039 const auto *CI = dyn_cast<CmpInst>(
Cond);
2040 if (CI && (CI->
getParent() ==
I->getParent())) {
2044 static const uint16_t SETFOpcTable[2][3] = {
2049 switch (Predicate) {
2052 SETFOpc = &SETFOpcTable[0][0];
2056 SETFOpc = &SETFOpcTable[1][0];
2072 if (!X86FastEmitCompare(CmpLHS, CmpRHS, CmpVT, CI->
getDebugLoc()))
2076 Register FlagReg1 = createResultReg(&X86::GR8RegClass);
2077 Register FlagReg2 = createResultReg(&X86::GR8RegClass);
2078 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(X86::SETCCr),
2079 FlagReg1).
addImm(SETFOpc[0]);
2080 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(X86::SETCCr),
2081 FlagReg2).
addImm(SETFOpc[1]);
2082 auto const &
II =
TII.get(SETFOpc[2]);
2083 if (
II.getNumDefs()) {
2084 Register TmpReg = createResultReg(&X86::GR8RegClass);
2085 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II, TmpReg)
2088 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II)
2093 }
else if (foldX86XALUIntrinsic(
CC,
I,
Cond)) {
2114 if (
MRI.getRegClass(CondReg) == &X86::VK1RegClass) {
2115 unsigned KCondReg = CondReg;
2116 CondReg = createResultReg(&X86::GR32RegClass);
2117 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2118 TII.get(TargetOpcode::COPY), CondReg)
2120 CondReg = fastEmitInst_extractsubreg(MVT::i8, CondReg, X86::sub_8bit);
2122 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(X86::TEST8ri))
2130 Register RHSReg = getRegForValue(RHS);
2131 Register LHSReg = getRegForValue(LHS);
2132 if (!LHSReg || !RHSReg)
2137 Subtarget->hasNDD());
2138 Register ResultReg = fastEmitInst_rri(Opc, RC, RHSReg, LHSReg,
CC);
2139 updateValueMap(
I, ResultReg);
2148bool X86FastISel::X86FastEmitSSESelect(
MVT RetVT,
const Instruction *
I) {
2152 const auto *CI = dyn_cast<FCmpInst>(
I->getOperand(0));
2153 if (!CI || (CI->
getParent() !=
I->getParent()))
2157 !((Subtarget->hasSSE1() && RetVT == MVT::f32) ||
2158 (Subtarget->hasSSE2() && RetVT == MVT::f64)))
2169 const auto *CmpRHSC = dyn_cast<ConstantFP>(CmpRHS);
2170 if (CmpRHSC && CmpRHSC->isNullValue())
2177 if (
CC > 7 && !Subtarget->hasAVX())
2186 Register LHSReg = getRegForValue(LHS);
2187 Register RHSReg = getRegForValue(RHS);
2188 Register CmpLHSReg = getRegForValue(CmpLHS);
2189 Register CmpRHSReg = getRegForValue(CmpRHS);
2190 if (!LHSReg || !RHSReg || !CmpLHSReg || !CmpRHSReg)
2196 if (Subtarget->hasAVX512()) {
2201 unsigned CmpOpcode =
2202 (RetVT == MVT::f32) ? X86::VCMPSSZrri : X86::VCMPSDZrri;
2203 Register CmpReg = fastEmitInst_rri(CmpOpcode, VK1, CmpLHSReg, CmpRHSReg,
2208 Register ImplicitDefReg = createResultReg(VR128X);
2209 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2210 TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg);
2214 unsigned MovOpcode =
2215 (RetVT == MVT::f32) ? X86::VMOVSSZrrk : X86::VMOVSDZrrk;
2216 unsigned MovReg = fastEmitInst_rrrr(MovOpcode, VR128X, RHSReg, CmpReg,
2217 ImplicitDefReg, LHSReg);
2219 ResultReg = createResultReg(RC);
2220 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2221 TII.get(TargetOpcode::COPY), ResultReg).
addReg(MovReg);
2223 }
else if (Subtarget->hasAVX()) {
2231 unsigned CmpOpcode =
2232 (RetVT == MVT::f32) ? X86::VCMPSSrri : X86::VCMPSDrri;
2233 unsigned BlendOpcode =
2234 (RetVT == MVT::f32) ? X86::VBLENDVPSrrr : X86::VBLENDVPDrrr;
2236 Register CmpReg = fastEmitInst_rri(CmpOpcode, RC, CmpLHSReg, CmpRHSReg,
2238 Register VBlendReg = fastEmitInst_rrr(BlendOpcode, VR128, RHSReg, LHSReg,
2240 ResultReg = createResultReg(RC);
2241 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2242 TII.get(TargetOpcode::COPY), ResultReg).
addReg(VBlendReg);
2245 static const uint16_t OpcTable[2][4] = {
2246 { X86::CMPSSrri, X86::ANDPSrr, X86::ANDNPSrr, X86::ORPSrr },
2247 { X86::CMPSDrri, X86::ANDPDrr, X86::ANDNPDrr, X86::ORPDrr }
2252 default:
return false;
2253 case MVT::f32: Opc = &OpcTable[0][0];
break;
2254 case MVT::f64: Opc = &OpcTable[1][0];
break;
2258 Register CmpReg = fastEmitInst_rri(Opc[0], RC, CmpLHSReg, CmpRHSReg,
CC);
2259 Register AndReg = fastEmitInst_rr(Opc[1], VR128, CmpReg, LHSReg);
2260 Register AndNReg = fastEmitInst_rr(Opc[2], VR128, CmpReg, RHSReg);
2261 Register OrReg = fastEmitInst_rr(Opc[3], VR128, AndNReg, AndReg);
2262 ResultReg = createResultReg(RC);
2263 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2264 TII.get(TargetOpcode::COPY), ResultReg).
addReg(OrReg);
2266 updateValueMap(
I, ResultReg);
2270bool X86FastISel::X86FastEmitPseudoSelect(
MVT RetVT,
const Instruction *
I) {
2275 default:
return false;
2276 case MVT::i8: Opc = X86::CMOV_GR8;
break;
2277 case MVT::i16: Opc = X86::CMOV_GR16;
break;
2278 case MVT::i32: Opc = X86::CMOV_GR32;
break;
2280 Opc = Subtarget->hasAVX512() ? X86::CMOV_FR16X : X86::CMOV_FR16;
break;
2282 Opc = Subtarget->hasAVX512() ? X86::CMOV_FR32X : X86::CMOV_FR32;
break;
2284 Opc = Subtarget->hasAVX512() ? X86::CMOV_FR64X : X86::CMOV_FR64;
break;
2293 const auto *CI = dyn_cast<CmpInst>(
Cond);
2294 if (CI && (CI->
getParent() ==
I->getParent())) {
2307 if (!X86FastEmitCompare(CmpLHS, CmpRHS, CmpVT, CI->
getDebugLoc()))
2315 if (
MRI.getRegClass(CondReg) == &X86::VK1RegClass) {
2316 unsigned KCondReg = CondReg;
2317 CondReg = createResultReg(&X86::GR32RegClass);
2318 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2319 TII.get(TargetOpcode::COPY), CondReg)
2321 CondReg = fastEmitInst_extractsubreg(MVT::i8, CondReg, X86::sub_8bit);
2323 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(X86::TEST8ri))
2331 Register LHSReg = getRegForValue(LHS);
2332 Register RHSReg = getRegForValue(RHS);
2333 if (!LHSReg || !RHSReg)
2339 fastEmitInst_rri(Opc, RC, RHSReg, LHSReg,
CC);
2340 updateValueMap(
I, ResultReg);
2344bool X86FastISel::X86SelectSelect(
const Instruction *
I) {
2346 if (!isTypeLegal(
I->getType(), RetVT))
2350 if (
const auto *CI = dyn_cast<CmpInst>(
I->getOperand(0))) {
2352 const Value *Opnd =
nullptr;
2353 switch (Predicate) {
2360 Register OpReg = getRegForValue(Opnd);
2364 Register ResultReg = createResultReg(RC);
2365 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2366 TII.get(TargetOpcode::COPY), ResultReg)
2368 updateValueMap(
I, ResultReg);
2374 if (X86FastEmitCMoveSelect(RetVT,
I))
2378 if (X86FastEmitSSESelect(RetVT,
I))
2383 if (X86FastEmitPseudoSelect(RetVT,
I))
2390bool X86FastISel::X86SelectIntToFP(
const Instruction *
I,
bool IsSigned) {
2395 bool HasAVX512 = Subtarget->hasAVX512();
2396 if (!Subtarget->hasAVX() || (!IsSigned && !HasAVX512))
2400 EVT SrcVT = TLI.getValueType(
DL,
I->getOperand(0)->getType());
2401 if (SrcVT != MVT::i32 && SrcVT != MVT::i64)
2405 Register OpReg = getRegForValue(
I->getOperand(0));
2411 static const uint16_t SCvtOpc[2][2][2] = {
2412 { { X86::VCVTSI2SSrr, X86::VCVTSI642SSrr },
2413 { X86::VCVTSI2SDrr, X86::VCVTSI642SDrr } },
2414 { { X86::VCVTSI2SSZrr, X86::VCVTSI642SSZrr },
2415 { X86::VCVTSI2SDZrr, X86::VCVTSI642SDZrr } },
2417 static const uint16_t UCvtOpc[2][2] = {
2418 { X86::VCVTUSI2SSZrr, X86::VCVTUSI642SSZrr },
2419 { X86::VCVTUSI2SDZrr, X86::VCVTUSI642SDZrr },
2421 bool Is64Bit = SrcVT == MVT::i64;
2423 if (
I->getType()->isDoubleTy()) {
2425 Opcode = IsSigned ? SCvtOpc[HasAVX512][1][Is64Bit] : UCvtOpc[1][Is64Bit];
2426 }
else if (
I->getType()->isFloatTy()) {
2428 Opcode = IsSigned ? SCvtOpc[HasAVX512][0][Is64Bit] : UCvtOpc[0][Is64Bit];
2434 Register ImplicitDefReg = createResultReg(RC);
2435 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2436 TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg);
2437 Register ResultReg = fastEmitInst_rr(Opcode, RC, ImplicitDefReg, OpReg);
2438 updateValueMap(
I, ResultReg);
2442bool X86FastISel::X86SelectSIToFP(
const Instruction *
I) {
2443 return X86SelectIntToFP(
I,
true);
2446bool X86FastISel::X86SelectUIToFP(
const Instruction *
I) {
2447 return X86SelectIntToFP(
I,
false);
2451bool X86FastISel::X86SelectFPExtOrFPTrunc(
const Instruction *
I,
2454 assert((
I->getOpcode() == Instruction::FPExt ||
2455 I->getOpcode() == Instruction::FPTrunc) &&
2456 "Instruction must be an FPExt or FPTrunc!");
2457 bool HasAVX = Subtarget->hasAVX();
2459 Register OpReg = getRegForValue(
I->getOperand(0));
2463 unsigned ImplicitDefReg;
2465 ImplicitDefReg = createResultReg(RC);
2466 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2467 TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg);
2471 Register ResultReg = createResultReg(RC);
2473 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(TargetOpc),
2477 MIB.
addReg(ImplicitDefReg);
2480 updateValueMap(
I, ResultReg);
2484bool X86FastISel::X86SelectFPExt(
const Instruction *
I) {
2485 if (Subtarget->hasSSE2() &&
I->getType()->isDoubleTy() &&
2486 I->getOperand(0)->getType()->isFloatTy()) {
2487 bool HasAVX512 = Subtarget->hasAVX512();
2490 HasAVX512 ? X86::VCVTSS2SDZrr
2491 : Subtarget->hasAVX() ? X86::VCVTSS2SDrr : X86::CVTSS2SDrr;
2492 return X86SelectFPExtOrFPTrunc(
I, Opc, TLI.getRegClassFor(MVT::f64));
2498bool X86FastISel::X86SelectFPTrunc(
const Instruction *
I) {
2499 if (Subtarget->hasSSE2() &&
I->getType()->isFloatTy() &&
2500 I->getOperand(0)->getType()->isDoubleTy()) {
2501 bool HasAVX512 = Subtarget->hasAVX512();
2504 HasAVX512 ? X86::VCVTSD2SSZrr
2505 : Subtarget->hasAVX() ? X86::VCVTSD2SSrr : X86::CVTSD2SSrr;
2506 return X86SelectFPExtOrFPTrunc(
I, Opc, TLI.getRegClassFor(MVT::f32));
2512bool X86FastISel::X86SelectTrunc(
const Instruction *
I) {
2513 EVT SrcVT = TLI.getValueType(
DL,
I->getOperand(0)->getType());
2514 EVT DstVT = TLI.getValueType(
DL,
I->getType());
2517 if (DstVT != MVT::i8 && DstVT != MVT::i1)
2519 if (!TLI.isTypeLegal(SrcVT))
2522 Register InputReg = getRegForValue(
I->getOperand(0));
2527 if (SrcVT == MVT::i8) {
2529 updateValueMap(
I, InputReg);
2534 Register ResultReg = fastEmitInst_extractsubreg(MVT::i8, InputReg,
2539 updateValueMap(
I, ResultReg);
2543bool X86FastISel::IsMemcpySmall(
uint64_t Len) {
2544 return Len <= (Subtarget->is64Bit() ? 32 : 16);
2551 if (!IsMemcpySmall(Len))
2554 bool i64Legal = Subtarget->is64Bit();
2559 if (Len >= 8 && i64Legal)
2569 bool RV = X86FastEmitLoad(VT, SrcAM,
nullptr, Reg);
2570 RV &= X86FastEmitStore(VT, Reg, DestAM);
2571 assert(RV &&
"Failed to emit load or store??");
2585 switch (
II->getIntrinsicID()) {
2586 default:
return false;
2587 case Intrinsic::convert_from_fp16:
2588 case Intrinsic::convert_to_fp16: {
2589 if (Subtarget->useSoftFloat() || !Subtarget->hasF16C())
2598 bool IsFloatToHalf =
II->getIntrinsicID() == Intrinsic::convert_to_fp16;
2599 if (IsFloatToHalf) {
2600 if (!
Op->getType()->isFloatTy())
2603 if (!
II->getType()->isFloatTy())
2607 unsigned ResultReg = 0;
2609 if (IsFloatToHalf) {
2617 unsigned Opc = Subtarget->hasVLX() ? X86::VCVTPS2PHZ128rr
2619 InputReg = fastEmitInst_ri(Opc, RC, InputReg, 4);
2622 Opc = Subtarget->hasAVX512() ? X86::VMOVPDI2DIZrr
2623 : X86::VMOVPDI2DIrr;
2624 ResultReg = createResultReg(&X86::GR32RegClass);
2625 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(Opc), ResultReg)
2629 unsigned RegIdx = X86::sub_16bit;
2630 ResultReg = fastEmitInst_extractsubreg(MVT::i16, ResultReg, RegIdx);
2632 assert(
Op->getType()->isIntegerTy(16) &&
"Expected a 16-bit integer!");
2640 unsigned Opc = Subtarget->hasVLX() ? X86::VCVTPH2PSZ128rr
2642 InputReg = fastEmitInst_r(Opc, RC, InputReg);
2646 ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32));
2647 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2648 TII.get(TargetOpcode::COPY), ResultReg)
2652 updateValueMap(
II, ResultReg);
2655 case Intrinsic::frameaddress: {
2660 Type *
RetTy =
II->getCalledFunction()->getReturnType();
2663 if (!isTypeLegal(
RetTy, VT))
2671 case MVT::i32: Opc = X86::MOV32rm; RC = &X86::GR32RegClass;
break;
2672 case MVT::i64: Opc = X86::MOV64rm; RC = &X86::GR64RegClass;
break;
2681 unsigned FrameReg =
RegInfo->getPtrSizedFrameRegister(*MF);
2682 assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
2683 (FrameReg == X86::EBP && VT == MVT::i32)) &&
2684 "Invalid Frame Register!");
2689 Register SrcReg = createResultReg(RC);
2690 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2691 TII.get(TargetOpcode::COPY), SrcReg).
addReg(FrameReg);
2698 unsigned Depth = cast<ConstantInt>(
II->getOperand(0))->getZExtValue();
2700 Register DestReg = createResultReg(RC);
2702 TII.get(Opc), DestReg), SrcReg);
2706 updateValueMap(
II, SrcReg);
2709 case Intrinsic::memcpy: {
2715 if (isa<ConstantInt>(MCI->
getLength())) {
2719 if (IsMemcpySmall(Len)) {
2724 TryEmitSmallMemcpy(DestAM, SrcAM, Len);
2729 unsigned SizeWidth = Subtarget->is64Bit() ? 64 : 32;
2736 return lowerCallTo(
II,
"memcpy",
II->arg_size() - 1);
2738 case Intrinsic::memset: {
2744 unsigned SizeWidth = Subtarget->is64Bit() ? 64 : 32;
2751 return lowerCallTo(
II,
"memset",
II->arg_size() - 1);
2753 case Intrinsic::stackprotector: {
2755 EVT PtrTy = TLI.getPointerTy(
DL);
2757 const Value *Op1 =
II->getArgOperand(0);
2765 if (!X86FastEmitStore(PtrTy, Op1, AM))
return false;
2768 case Intrinsic::dbg_declare: {
2776 "Expected inlined-at fields to agree");
2783 case Intrinsic::trap: {
2784 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(X86::TRAP));
2787 case Intrinsic::sqrt: {
2788 if (!Subtarget->hasSSE1())
2791 Type *
RetTy =
II->getCalledFunction()->getReturnType();
2794 if (!isTypeLegal(
RetTy, VT))
2800 static const uint16_t SqrtOpc[3][2] = {
2801 { X86::SQRTSSr, X86::SQRTSDr },
2802 { X86::VSQRTSSr, X86::VSQRTSDr },
2803 { X86::VSQRTSSZr, X86::VSQRTSDZr },
2805 unsigned AVXLevel = Subtarget->hasAVX512() ? 2 :
2806 Subtarget->hasAVX() ? 1 :
2810 default:
return false;
2811 case MVT::f32: Opc = SqrtOpc[AVXLevel][0];
break;
2812 case MVT::f64: Opc = SqrtOpc[AVXLevel][1];
break;
2815 const Value *SrcVal =
II->getArgOperand(0);
2816 Register SrcReg = getRegForValue(SrcVal);
2822 unsigned ImplicitDefReg = 0;
2824 ImplicitDefReg = createResultReg(RC);
2825 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2826 TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg);
2829 Register ResultReg = createResultReg(RC);
2831 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(Opc),
2835 MIB.
addReg(ImplicitDefReg);
2839 updateValueMap(
II, ResultReg);
2842 case Intrinsic::sadd_with_overflow:
2843 case Intrinsic::uadd_with_overflow:
2844 case Intrinsic::ssub_with_overflow:
2845 case Intrinsic::usub_with_overflow:
2846 case Intrinsic::smul_with_overflow:
2847 case Intrinsic::umul_with_overflow: {
2851 auto *Ty = cast<StructType>(
Callee->getReturnType());
2855 "Overflow value expected to be an i1");
2858 if (!isTypeLegal(
RetTy, VT))
2861 if (VT < MVT::i8 || VT > MVT::i64)
2868 if (isa<ConstantInt>(LHS) && !isa<ConstantInt>(RHS) &&
II->isCommutative())
2872 switch (
II->getIntrinsicID()) {
2874 case Intrinsic::sadd_with_overflow:
2876 case Intrinsic::uadd_with_overflow:
2878 case Intrinsic::ssub_with_overflow:
2880 case Intrinsic::usub_with_overflow:
2882 case Intrinsic::smul_with_overflow:
2884 case Intrinsic::umul_with_overflow:
2888 Register LHSReg = getRegForValue(LHS);
2892 unsigned ResultReg = 0;
2894 if (
const auto *CI = dyn_cast<ConstantInt>(RHS)) {
2895 static const uint16_t Opc[2][4] = {
2896 { X86::INC8r, X86::INC16r, X86::INC32r, X86::INC64r },
2897 { X86::DEC8r, X86::DEC16r, X86::DEC32r, X86::DEC64r }
2903 ResultReg = createResultReg(TLI.getRegClassFor(VT));
2905 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2906 TII.get(Opc[IsDec][VT.
SimpleTy-MVT::i8]), ResultReg)
2909 ResultReg = fastEmit_ri(VT, VT, BaseOpc, LHSReg, CI->getZExtValue());
2914 RHSReg = getRegForValue(RHS);
2917 ResultReg = fastEmit_rr(VT, VT, BaseOpc, LHSReg, RHSReg);
2924 { X86::MUL8r, X86::MUL16r, X86::MUL32r, X86::MUL64r };
2925 static const MCPhysReg Reg[] = { X86::AL, X86::AX, X86::EAX, X86::RAX };
2928 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2929 TII.get(TargetOpcode::COPY), Reg[VT.
SimpleTy-MVT::i8])
2931 ResultReg = fastEmitInst_r(MULOpc[VT.
SimpleTy-MVT::i8],
2932 TLI.getRegClassFor(VT), RHSReg);
2935 { X86::IMUL8r, X86::IMUL16rr, X86::IMUL32rr, X86::IMUL64rr };
2936 if (VT == MVT::i8) {
2939 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2940 TII.get(TargetOpcode::COPY), X86::AL)
2942 ResultReg = fastEmitInst_r(MULOpc[0], TLI.getRegClassFor(VT), RHSReg);
2944 ResultReg = fastEmitInst_rr(MULOpc[VT.
SimpleTy-MVT::i8],
2945 TLI.getRegClassFor(VT), LHSReg, RHSReg);
2952 Register ResultReg2 = createResultReg(&X86::GR8RegClass);
2953 assert((ResultReg+1) == ResultReg2 &&
"Nonconsecutive result registers.");
2954 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(X86::SETCCr),
2955 ResultReg2).
addImm(CondCode);
2957 updateValueMap(
II, ResultReg, 2);
2960 case Intrinsic::x86_sse_cvttss2si:
2961 case Intrinsic::x86_sse_cvttss2si64:
2962 case Intrinsic::x86_sse2_cvttsd2si:
2963 case Intrinsic::x86_sse2_cvttsd2si64: {
2965 switch (
II->getIntrinsicID()) {
2967 case Intrinsic::x86_sse_cvttss2si:
2968 case Intrinsic::x86_sse_cvttss2si64:
2969 if (!Subtarget->hasSSE1())
2971 IsInputDouble =
false;
2973 case Intrinsic::x86_sse2_cvttsd2si:
2974 case Intrinsic::x86_sse2_cvttsd2si64:
2975 if (!Subtarget->hasSSE2())
2977 IsInputDouble =
true;
2981 Type *
RetTy =
II->getCalledFunction()->getReturnType();
2983 if (!isTypeLegal(
RetTy, VT))
2986 static const uint16_t CvtOpc[3][2][2] = {
2987 { { X86::CVTTSS2SIrr, X86::CVTTSS2SI64rr },
2988 { X86::CVTTSD2SIrr, X86::CVTTSD2SI64rr } },
2989 { { X86::VCVTTSS2SIrr, X86::VCVTTSS2SI64rr },
2990 { X86::VCVTTSD2SIrr, X86::VCVTTSD2SI64rr } },
2991 { { X86::VCVTTSS2SIZrr, X86::VCVTTSS2SI64Zrr },
2992 { X86::VCVTTSD2SIZrr, X86::VCVTTSD2SI64Zrr } },
2994 unsigned AVXLevel = Subtarget->hasAVX512() ? 2 :
2995 Subtarget->hasAVX() ? 1 :
3000 case MVT::i32: Opc = CvtOpc[AVXLevel][IsInputDouble][0];
break;
3001 case MVT::i64: Opc = CvtOpc[AVXLevel][IsInputDouble][1];
break;
3006 while (
auto *IE = dyn_cast<InsertElementInst>(
Op)) {
3008 if (!isa<ConstantInt>(
Index))
3010 unsigned Idx = cast<ConstantInt>(
Index)->getZExtValue();
3013 Op =
IE->getOperand(1);
3016 Op =
IE->getOperand(0);
3023 Register ResultReg = createResultReg(TLI.getRegClassFor(VT));
3024 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(Opc), ResultReg)
3027 updateValueMap(
II, ResultReg);
3030 case Intrinsic::x86_sse42_crc32_32_8:
3031 case Intrinsic::x86_sse42_crc32_32_16:
3032 case Intrinsic::x86_sse42_crc32_32_32:
3033 case Intrinsic::x86_sse42_crc32_64_64: {
3034 if (!Subtarget->hasCRC32())
3037 Type *
RetTy =
II->getCalledFunction()->getReturnType();
3040 if (!isTypeLegal(
RetTy, VT))
3046 switch (
II->getIntrinsicID()) {
3049#define GET_EGPR_IF_ENABLED(OPC) Subtarget->hasEGPR() ? OPC##_EVEX : OPC
3050 case Intrinsic::x86_sse42_crc32_32_8:
3052 RC = &X86::GR32RegClass;
3054 case Intrinsic::x86_sse42_crc32_32_16:
3056 RC = &X86::GR32RegClass;
3058 case Intrinsic::x86_sse42_crc32_32_32:
3060 RC = &X86::GR32RegClass;
3062 case Intrinsic::x86_sse42_crc32_64_64:
3064 RC = &X86::GR64RegClass;
3066#undef GET_EGPR_IF_ENABLED
3072 Register LHSReg = getRegForValue(LHS);
3073 Register RHSReg = getRegForValue(RHS);
3074 if (!LHSReg || !RHSReg)
3077 Register ResultReg = fastEmitInst_rr(Opc, RC, LHSReg, RHSReg);
3081 updateValueMap(
II, ResultReg);
3087bool X86FastISel::fastLowerArguments() {
3088 if (!FuncInfo.CanLowerReturn)
3099 if (Subtarget->isCallingConvWin64(
CC))
3102 if (!Subtarget->is64Bit())
3105 if (Subtarget->useSoftFloat())
3109 unsigned GPRCnt = 0;
3110 unsigned FPRCnt = 0;
3111 for (
auto const &Arg :
F->args()) {
3112 if (Arg.hasAttribute(Attribute::ByVal) ||
3113 Arg.hasAttribute(Attribute::InReg) ||
3114 Arg.hasAttribute(Attribute::StructRet) ||
3115 Arg.hasAttribute(Attribute::SwiftSelf) ||
3116 Arg.hasAttribute(Attribute::SwiftAsync) ||
3117 Arg.hasAttribute(Attribute::SwiftError) ||
3118 Arg.hasAttribute(Attribute::Nest))
3121 Type *ArgTy = Arg.getType();
3125 EVT ArgVT = TLI.getValueType(
DL, ArgTy);
3126 if (!ArgVT.
isSimple())
return false;
3128 default:
return false;
3135 if (!Subtarget->hasSSE1())
3148 static const MCPhysReg GPR32ArgRegs[] = {
3149 X86::EDI, X86::ESI, X86::EDX, X86::ECX, X86::R8D, X86::R9D
3151 static const MCPhysReg GPR64ArgRegs[] = {
3152 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8 , X86::R9
3155 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
3156 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
3160 unsigned FPRIdx = 0;
3161 for (
auto const &Arg :
F->args()) {
3162 MVT VT = TLI.getSimpleValueType(
DL, Arg.getType());
3167 case MVT::i32: SrcReg = GPR32ArgRegs[
GPRIdx++];
break;
3168 case MVT::i64: SrcReg = GPR64ArgRegs[
GPRIdx++];
break;
3169 case MVT::f32: [[fallthrough]];
3170 case MVT::f64: SrcReg = XMMArgRegs[FPRIdx++];
break;
3172 Register DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC);
3176 Register ResultReg = createResultReg(RC);
3177 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3178 TII.get(TargetOpcode::COPY), ResultReg)
3180 updateValueMap(&Arg, ResultReg);
3188 if (Subtarget->is64Bit())
3205bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
3206 auto &OutVals = CLI.OutVals;
3207 auto &OutFlags = CLI.OutFlags;
3208 auto &OutRegs = CLI.OutRegs;
3209 auto &
Ins = CLI.Ins;
3210 auto &InRegs = CLI.InRegs;
3212 bool &IsTailCall = CLI.IsTailCall;
3213 bool IsVarArg = CLI.IsVarArg;
3216 const auto *CB = CLI.CB;
3218 bool Is64Bit = Subtarget->is64Bit();
3219 bool IsWin64 = Subtarget->isCallingConvWin64(
CC);
3223 if (CB && CB->doesNoCfCheck())
3227 if ((CB && isa<CallInst>(CB) && CB->hasFnAttr(
"no_caller_saved_registers")))
3231 if ((CB && CB->hasFnAttr(
"no_callee_saved_registers")))
3239 if (Subtarget->useIndirectThunkCalls())
3244 default:
return false;
3271 if (IsVarArg && IsWin64)
3275 if (CLI.CB && CLI.CB->hasInAllocaArgument())
3278 for (
auto Flag : CLI.OutFlags)
3279 if (
Flag.isSwiftError() ||
Flag.isPreallocated())
3288 for (
int i = 0, e = OutVals.size(); i != e; ++i) {
3289 Value *&Val = OutVals[i];
3291 if (
auto *CI = dyn_cast<ConstantInt>(Val)) {
3292 if (CI->getBitWidth() < 32) {
3294 Val = ConstantInt::get(CI->
getContext(), CI->getValue().sext(32));
3296 Val = ConstantInt::get(CI->
getContext(), CI->getValue().zext(32));
3303 auto *TI = dyn_cast<TruncInst>(Val);
3305 if (TI && TI->getType()->isIntegerTy(1) && CLI.CB &&
3306 (TI->getParent() == CLI.CB->getParent()) && TI->hasOneUse()) {
3307 Value *PrevVal = TI->getOperand(0);
3308 ResultReg = getRegForValue(PrevVal);
3313 if (!isTypeLegal(PrevVal->
getType(), VT))
3316 ResultReg = fastEmit_ri(VT, VT,
ISD::AND, ResultReg, 1);
3318 if (!isTypeLegal(Val->
getType(), VT) ||
3321 ResultReg = getRegForValue(Val);
3333 CCState CCInfo(
CC, IsVarArg, *FuncInfo.MF, ArgLocs, CLI.RetTy->getContext());
3337 CCInfo.AllocateStack(32,
Align(8));
3339 CCInfo.AnalyzeCallOperands(OutVTs, OutFlags,
CC_X86);
3342 unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
3345 unsigned AdjStackDown =
TII.getCallFrameSetupOpcode();
3346 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(AdjStackDown))
3355 if (ArgVT == MVT::x86mmx)
3358 unsigned ArgReg = ArgRegs[VA.
getValNo()];
3365 "Unexpected extend");
3367 if (ArgVT == MVT::i1)
3372 assert(Emitted &&
"Failed to emit a sext!"); (void)Emitted;
3378 "Unexpected extend");
3381 if (ArgVT == MVT::i1) {
3383 ArgReg = fastEmitZExtFromI1(MVT::i8, ArgReg);
3392 assert(Emitted &&
"Failed to emit a zext!"); (void)Emitted;
3398 "Unexpected extend");
3408 assert(Emitted &&
"Failed to emit a aext!"); (void)Emitted;
3414 assert(ArgReg &&
"Failed to emit a bitcast!");
3435 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3442 if (isa<UndefValue>(ArgVal))
3448 AM.
Disp = LocMemOffset;
3454 if (
Flags.isByVal()) {
3457 if (!TryEmitSmallMemcpy(AM, SrcAM,
Flags.getByValSize()))
3459 }
else if (isa<ConstantInt>(ArgVal) || isa<ConstantPointerNull>(ArgVal)) {
3463 if (!X86FastEmitStore(ArgVT, ArgVal, AM, MMO))
3466 if (!X86FastEmitStore(ArgVT, ArgReg, AM, MMO))
3474 if (Subtarget->isPICStyleGOT()) {
3475 unsigned Base = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
3476 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3480 if (Is64Bit && IsVarArg && !IsWin64) {
3491 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
3492 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
3494 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs);
3495 assert((Subtarget->hasSSE1() || !NumXMMRegs)
3496 &&
"SSE registers cannot be used when SSE is disabled");
3497 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(X86::MOV8ri),
3498 X86::AL).
addImm(NumXMMRegs);
3504 if (!X86SelectCallAddress(Callee, CalleeAM))
3507 unsigned CalleeOp = 0;
3509 if (CalleeAM.
GV !=
nullptr) {
3511 }
else if (CalleeAM.
Base.
Reg != 0) {
3520 unsigned CallOpc = Is64Bit ? X86::CALL64r : X86::CALL32r;
3521 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(CallOpc))
3525 assert(GV &&
"Not a direct call");
3527 unsigned char OpFlags = Subtarget->classifyGlobalFunctionReference(GV);
3530 cast<Function>(GV)->isIntrinsic())
3539 unsigned CallOpc = NeedLoad
3540 ? (Is64Bit ? X86::CALL64m : X86::CALL32m)
3541 : (Is64Bit ? X86::CALL64pcrel32 : X86::CALLpcrel32);
3543 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(CallOpc));
3547 MIB.
addSym(Symbol, OpFlags);
3559 if (Subtarget->isPICStyleGOT())
3562 if (Is64Bit && IsVarArg && !IsWin64)
3566 for (
auto Reg : OutRegs)
3570 unsigned NumBytesForCalleeToPop =
3572 TM.Options.GuaranteedTailCallOpt)
3575 unsigned AdjStackUp =
TII.getCallFrameDestroyOpcode();
3576 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(AdjStackUp))
3581 CCState CCRetInfo(
CC, IsVarArg, *FuncInfo.MF, RVLocs,
3582 CLI.RetTy->getContext());
3583 CCRetInfo.AnalyzeCallResult(Ins,
RetCC_X86);
3586 Register ResultReg = FuncInfo.CreateRegs(CLI.RetTy);
3587 for (
unsigned i = 0; i != RVLocs.
size(); ++i) {
3590 unsigned CopyReg = ResultReg + i;
3594 if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) &&
3595 ((Is64Bit || Ins[i].
Flags.isInReg()) && !Subtarget->hasSSE1())) {
3601 if ((SrcReg == X86::FP0 || SrcReg == X86::FP1) &&
3602 isScalarFPTypeInSSEReg(VA.
getValVT())) {
3604 CopyReg = createResultReg(&X86::RFP80RegClass);
3608 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3609 TII.get(TargetOpcode::COPY), CopyReg).
addReg(SrcReg);
3617 unsigned Opc = ResVT == MVT::f32 ? X86::ST_Fp80m32 : X86::ST_Fp80m64;
3623 Opc = ResVT == MVT::f32 ? X86::MOVSSrm_alt : X86::MOVSDrm_alt;
3625 TII.get(Opc), ResultReg + i), FI);
3629 CLI.ResultReg = ResultReg;
3630 CLI.NumResultRegs = RVLocs.
size();
3637X86FastISel::fastSelectInstruction(
const Instruction *
I) {
3638 switch (
I->getOpcode()) {
3640 case Instruction::Load:
3641 return X86SelectLoad(
I);
3642 case Instruction::Store:
3643 return X86SelectStore(
I);
3644 case Instruction::Ret:
3645 return X86SelectRet(
I);
3646 case Instruction::ICmp:
3647 case Instruction::FCmp:
3648 return X86SelectCmp(
I);
3649 case Instruction::ZExt:
3650 return X86SelectZExt(
I);
3651 case Instruction::SExt:
3652 return X86SelectSExt(
I);
3653 case Instruction::Br:
3654 return X86SelectBranch(
I);
3655 case Instruction::LShr:
3656 case Instruction::AShr:
3657 case Instruction::Shl:
3658 return X86SelectShift(
I);
3659 case Instruction::SDiv:
3660 case Instruction::UDiv:
3661 case Instruction::SRem:
3662 case Instruction::URem:
3663 return X86SelectDivRem(
I);
3664 case Instruction::Select:
3665 return X86SelectSelect(
I);
3666 case Instruction::Trunc:
3667 return X86SelectTrunc(
I);
3668 case Instruction::FPExt:
3669 return X86SelectFPExt(
I);
3670 case Instruction::FPTrunc:
3671 return X86SelectFPTrunc(
I);
3672 case Instruction::SIToFP:
3673 return X86SelectSIToFP(
I);
3674 case Instruction::UIToFP:
3675 return X86SelectUIToFP(
I);
3676 case Instruction::IntToPtr:
3677 case Instruction::PtrToInt: {
3678 EVT SrcVT = TLI.getValueType(
DL,
I->getOperand(0)->getType());
3679 EVT DstVT = TLI.getValueType(
DL,
I->getType());
3681 return X86SelectZExt(
I);
3683 return X86SelectTrunc(
I);
3685 if (Reg == 0)
return false;
3686 updateValueMap(
I, Reg);
3689 case Instruction::BitCast: {
3691 if (!Subtarget->hasSSE2())
3695 if (!isTypeLegal(
I->getOperand(0)->getType(), SrcVT) ||
3696 !isTypeLegal(
I->getType(), DstVT))
3712 Register ResultReg = createResultReg(DstClass);
3713 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3714 TII.get(TargetOpcode::COPY), ResultReg).
addReg(Reg);
3716 updateValueMap(
I, ResultReg);
3724unsigned X86FastISel::X86MaterializeInt(
const ConstantInt *CI,
MVT VT) {
3730 Register SrcReg = fastEmitInst_(X86::MOV32r0, &X86::GR32RegClass);
3735 return fastEmitInst_extractsubreg(MVT::i8, SrcReg, X86::sub_8bit);
3737 return fastEmitInst_extractsubreg(MVT::i16, SrcReg, X86::sub_16bit);
3741 Register ResultReg = createResultReg(&X86::GR64RegClass);
3742 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3743 TII.get(TargetOpcode::SUBREG_TO_REG), ResultReg)
3756 case MVT::i8: Opc = X86::MOV8ri;
break;
3757 case MVT::i16: Opc = X86::MOV16ri;
break;
3758 case MVT::i32: Opc = X86::MOV32ri;
break;
3760 if (isUInt<32>(Imm))
3761 Opc = X86::MOV32ri64;
3762 else if (isInt<32>(Imm))
3763 Opc = X86::MOV64ri32;
3769 return fastEmitInst_i(Opc, TLI.getRegClassFor(VT), Imm);
3772unsigned X86FastISel::X86MaterializeFP(
const ConstantFP *CFP,
MVT VT) {
3774 return fastMaterializeFloatZero(CFP);
3784 bool HasSSE1 = Subtarget->hasSSE1();
3785 bool HasSSE2 = Subtarget->hasSSE2();
3786 bool HasAVX = Subtarget->hasAVX();
3787 bool HasAVX512 = Subtarget->hasAVX512();
3791 Opc = HasAVX512 ? X86::VMOVSSZrm_alt
3792 : HasAVX ? X86::VMOVSSrm_alt
3793 : HasSSE1 ? X86::MOVSSrm_alt
3797 Opc = HasAVX512 ? X86::VMOVSDZrm_alt
3798 : HasAVX ? X86::VMOVSDrm_alt
3799 : HasSSE2 ? X86::MOVSDrm_alt
3811 unsigned PICBase = 0;
3812 unsigned char OpFlag = Subtarget->classifyLocalReference(
nullptr);
3814 PICBase = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
3816 PICBase = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
3821 unsigned CPI = MCP.getConstantPoolIndex(CFP, Alignment);
3826 Register AddrReg = createResultReg(&X86::GR64RegClass);
3827 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(X86::MOV64ri),
3831 TII.get(Opc), ResultReg);
3832 addRegReg(MIB, AddrReg,
false, PICBase,
false);
3841 TII.get(Opc), ResultReg),
3842 CPI, PICBase, OpFlag);
3846unsigned X86FastISel::X86MaterializeGV(
const GlobalValue *GV,
MVT VT) {
3851 if (
TM.isLargeGlobalValue(GV))
3863 Register ResultReg = createResultReg(TLI.getRegClassFor(VT));
3865 TLI.getPointerTy(
DL) == MVT::i64) {
3868 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(X86::MOV64ri),
3873 TLI.getPointerTy(
DL) == MVT::i32
3874 ? (Subtarget->isTarget64BitILP32() ? X86::LEA64_32r : X86::LEA32r)
3877 TII.get(Opc), ResultReg), AM);
3884unsigned X86FastISel::fastMaterializeConstant(
const Constant *
C) {
3885 EVT CEVT = TLI.getValueType(
DL,
C->getType(),
true);
3892 if (
const auto *CI = dyn_cast<ConstantInt>(
C))
3893 return X86MaterializeInt(CI, VT);
3894 if (
const auto *CFP = dyn_cast<ConstantFP>(
C))
3895 return X86MaterializeFP(CFP, VT);
3896 if (
const auto *GV = dyn_cast<GlobalValue>(
C))
3897 return X86MaterializeGV(GV, VT);
3898 if (isa<UndefValue>(
C)) {
3904 if (!Subtarget->hasSSE1())
3905 Opc = X86::LD_Fp032;
3908 if (!Subtarget->hasSSE2())
3909 Opc = X86::LD_Fp064;
3912 Opc = X86::LD_Fp080;
3917 Register ResultReg = createResultReg(TLI.getRegClassFor(VT));
3918 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(Opc),
3927unsigned X86FastISel::fastMaterializeAlloca(
const AllocaInst *
C) {
3935 if (!FuncInfo.StaticAllocaMap.count(
C))
3937 assert(
C->isStaticAlloca() &&
"dynamic alloca in the static alloca map?");
3943 TLI.getPointerTy(
DL) == MVT::i32
3944 ? (Subtarget->isTarget64BitILP32() ? X86::LEA64_32r : X86::LEA32r)
3947 Register ResultReg = createResultReg(RC);
3949 TII.get(Opc), ResultReg), AM);
3953unsigned X86FastISel::fastMaterializeFloatZero(
const ConstantFP *CF) {
3955 if (!isTypeLegal(CF->
getType(), VT))
3959 bool HasSSE1 = Subtarget->hasSSE1();
3960 bool HasSSE2 = Subtarget->hasSSE2();
3961 bool HasAVX512 = Subtarget->hasAVX512();
3966 Opc = HasAVX512 ? X86::AVX512_FsFLD0SH : X86::FsFLD0SH;
3969 Opc = HasAVX512 ? X86::AVX512_FsFLD0SS
3970 : HasSSE1 ? X86::FsFLD0SS
3974 Opc = HasAVX512 ? X86::AVX512_FsFLD0SD
3975 : HasSSE2 ? X86::FsFLD0SD
3983 Register ResultReg = createResultReg(TLI.getRegClassFor(VT));
3984 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(Opc), ResultReg);
3989bool X86FastISel::tryToFoldLoadIntoMI(
MachineInstr *
MI,
unsigned OpNo,
4004 *FuncInfo.MF, *
MI, OpNo, AddrOps, FuncInfo.InsertPt,
Size, LI->
getAlign(),
4014 unsigned OperandNo = 0;
4016 E =
Result->operands_end();
I != E; ++
I, ++OperandNo) {
4023 if (IndexReg == MO.
getReg())
4028 Result->addMemOperand(*FuncInfo.MF, createMachineMemOperandFor(LI));
4029 Result->cloneInstrSymbols(*FuncInfo.MF, *
MI);
4031 removeDeadCode(
I, std::next(
I));
4035unsigned X86FastISel::fastEmitInst_rrrr(
unsigned MachineInstOpcode,
4037 unsigned Op0,
unsigned Op1,
4038 unsigned Op2,
unsigned Op3) {
4041 Register ResultReg = createResultReg(RC);
4047 if (
II.getNumDefs() >= 1)
4048 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II, ResultReg)
4054 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II)
4059 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(TargetOpcode::COPY),
4070 return new X86FastISel(funcInfo, libInfo);
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines the FastISel class.
const HexagonInstrInfo * TII
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
unsigned const TargetRegisterInfo * TRI
uint64_t IntrinsicInst * II
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static unsigned X86ChooseCmpImmediateOpcode(EVT VT, const ConstantInt *RHSC)
If we have a comparison with RHS as the RHS of the comparison, return an opcode that works for the co...
#define GET_EGPR_IF_ENABLED(OPC)
static std::pair< unsigned, bool > getX86SSEConditionCode(CmpInst::Predicate Predicate)
static unsigned computeBytesPoppedByCalleeForSRet(const X86Subtarget *Subtarget, CallingConv::ID CC, const CallBase *CB)
static unsigned X86ChooseCmpOpcode(EVT VT, const X86Subtarget *Subtarget)
static void X86SelectAddress(const MachineInstr &I, const MachineRegisterInfo &MRI, X86AddressMode &AM)
an instruction to allocate memory on the stack
This class represents an incoming formal argument to a Function.
LLVM Basic Block Representation.
InstListType::const_iterator const_iterator
Conditional or Unconditional Branch instruction.
BasicBlock * getSuccessor(unsigned i) const
Value * getCondition() const
CCState - This class holds information needed while lowering arguments and return values.
CCValAssign - Represent assignment of one arg/retval to a location.
Register getLocReg() const
LocInfo getLocInfo() const
int64_t getLocMemOffset() const
unsigned getValNo() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
This class is the base class for the comparison instructions.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Predicate getPredicate() const
Return the predicate for this instruction.
A constant value that is initialized with an expression using other constant values.
ConstantFP - Floating Point Values [float, double].
This is the shared class of boolean and integer constants.
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
This is an important base class in LLVM.
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this variable.
This class represents an Operation in the Expression.
This represents the llvm.dbg.declare instruction.
Value * getAddress() const
DILocalVariable * getVariable() const
DIExpression * getExpression() const
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
virtual unsigned fastMaterializeFloatZero(const ConstantFP *CF)
Emit the floating-point constant +0.0 in a register using target- specific logic.
virtual bool fastLowerIntrinsicCall(const IntrinsicInst *II)
This method is called by target-independent code to do target- specific intrinsic lowering.
virtual bool tryToFoldLoadIntoMI(MachineInstr *, unsigned, const LoadInst *)
The specified machine instr operand is a vreg, and that vreg is being provided by the specified load ...
virtual unsigned fastMaterializeConstant(const Constant *C)
Emit a constant in a register using target-specific logic, such as constant pool loads.
virtual bool fastLowerCall(CallLoweringInfo &CLI)
This method is called by target-independent code to do target- specific call lowering.
virtual bool fastLowerArguments()
This method is called by target-independent code to do target- specific argument lowering.
virtual bool fastSelectInstruction(const Instruction *I)=0
This method is called by target-independent code when the normal FastISel process fails to select an ...
virtual unsigned fastMaterializeAlloca(const AllocaInst *C)
Emit an alloca address in a register using target-specific logic.
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
A wrapper class for inspecting calls to intrinsic functions.
An instruction for reading from memory.
Value * getPointerOperand()
Align getAlign() const
Return the alignment of the access that is being performed.
bool usesWindowsCFI() const
Describe properties that are true of each instruction in the target description file.
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
MVT getVectorElementType() const
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
void setFrameAddressIsTaken(bool T)
void setStackProtectorIndex(int I)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addMetadata(const MDNode *MD) const
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
const MachineInstrBuilder & addGlobalAddress(const GlobalValue *GV, int64_t Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
unsigned getNumOperands() const
Retuns the total number of operands.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
void addMemOperand(MachineFunction &MF, MachineMemOperand *MO)
Add a MachineMemOperand to the machine instruction.
A description of a memory reference used in the backend.
bool isNonTemporal() const
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
void setReg(Register Reg)
Change the register this operand corresponds to.
Register getReg() const
getReg - Returns the register number.
This class wraps the llvm.memcpy intrinsic.
Value * getLength() const
Value * getRawDest() const
unsigned getDestAddressSpace() const
This class wraps the llvm.memset and llvm.memset.inline intrinsics.
Value * getRawSource() const
Return the arguments to the instruction.
unsigned getSourceAddressSpace() const
Wrapper class representing virtual and physical registers.
Return a value (possibly void), from a function.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
Value * getValueOperand()
Value * getPointerOperand()
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
TypeSize getElementOffset(unsigned Idx) const
Class to represent struct types.
Provides information about what library functions are available for the current target.
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
bool isOSMSVCRT() const
Is this a "Windows" OS targeting a "MSVCRT.dll" environment.
This class represents a truncation of integer types.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isArrayTy() const
True if this is an instance of ArrayType.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isStructTy() const
True if this is an instance of StructType.
bool isIntegerTy() const
True if this is an instance of IntegerType.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
LLVMContext & getContext() const
All values hold a context through their type.
MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const override
Fold a load or store of the specified stack slot into the specified machine instruction for the speci...
X86MachineFunctionInfo - This class is derived from MachineFunction and contains private X86 target-s...
Register getSRetReturnReg() const
unsigned getBytesToPopOnReturn() const
const Triple & getTargetTriple() const
const ParentTy * getParent() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ X86_64_SysV
The C convention as specified in the x86-64 supplement to the System V ABI, used on most non-Windows ...
@ HiPE
Used by the High-Performance Erlang Compiler (HiPE).
@ Swift
Calling convention for Swift.
@ CFGuard_Check
Special calling convention on Windows for calling the Control Guard Check ICall funtion.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
@ X86_ThisCall
Similar to X86_StdCall.
@ X86_StdCall
stdcall is mostly used by the Win32 API.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
@ Win64
The C convention as implemented on Windows/x86-64 and AArch64.
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
@ C
The default llvm calling convention, compatible with C.
@ X86_FastCall
'fast' analog of X86_StdCall.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ ADD
Simple integer binary arithmetic operators.
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ SIGN_EXTEND
Conversion operators.
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ AND
Bitwise operators - logical and, logical or, logical xor.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Kill
The last use of a register.
Reg
All possible values of the reg field in the ModR/M byte.
@ MO_GOTPCREL_NORELAX
MO_GOTPCREL_NORELAX - Same as MO_GOTPCREL except that R_X86_64_GOTPCREL relocations are guaranteed to...
@ MO_GOTOFF
MO_GOTOFF - On a symbol operand this indicates that the immediate is the offset to the location of th...
@ MO_COFFSTUB
MO_COFFSTUB - On a symbol operand "FOO", this indicates that the reference is actually to the "....
@ MO_PLT
MO_PLT - On a symbol operand this indicates that the immediate is offset to the PLT entry of symbol n...
@ MO_NO_FLAG
MO_NO_FLAG - No flag for the operand.
@ MO_DLLIMPORT
MO_DLLIMPORT - On a symbol operand "FOO", this indicates that the reference is actually to the "__imp...
@ MO_PIC_BASE_OFFSET
MO_PIC_BASE_OFFSET - On a symbol operand this indicates that the immediate should get the value of th...
@ MO_GOTPCREL
MO_GOTPCREL - On a symbol operand this indicates that the immediate is offset to the GOT entry for th...
std::pair< CondCode, bool > getX86ConditionCode(CmpInst::Predicate Predicate)
Return a pair of condition code for the given predicate and whether the instruction operands should b...
bool isCalleePop(CallingConv::ID CallingConv, bool is64Bit, bool IsVarArg, bool GuaranteeTCO)
Determines whether the callee is required to pop its own arguments.
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
unsigned getCMovOpcode(unsigned RegBytes, bool HasMemoryOperand=false, bool HasNDD=false)
Return a cmov opcode for the given register size in bytes, and operand type.
StringMapEntry< std::atomic< TypeEntryBody * > > TypeEntry
@ Emitted
Assigned address, still materializing.
This is an optimization pass for GlobalISel generic memory operations.
static bool isGlobalStubReference(unsigned char TargetFlag)
isGlobalStubReference - Return true if the specified TargetFlag operand is a reference to a stub for ...
static bool isGlobalRelativeToPICBase(unsigned char TargetFlag)
isGlobalRelativeToPICBase - Return true if the specified global value reference is relative to a 32-b...
Register constrainOperandRegClass(const MachineFunction &MF, const TargetRegisterInfo &TRI, MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, MachineInstr &InsertPt, const TargetRegisterClass &RegClass, MachineOperand &RegMO)
Constrain the Register operand OpIdx, so that it is now constrained to the TargetRegisterClass passed...
void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags,...
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
auto successors(const MachineBasicBlock *BB)
bool RetCC_X86(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
static const MachineInstrBuilder & addFrameReference(const MachineInstrBuilder &MIB, int FI, int Offset=0, bool mem=true)
addFrameReference - This function is used to add a reference to the base of an abstract object on the...
static const MachineInstrBuilder & addFullAddress(const MachineInstrBuilder &MIB, const X86AddressMode &AM)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
auto reverse(ContainerTy &&C)
static const MachineInstrBuilder & addConstantPoolReference(const MachineInstrBuilder &MIB, unsigned CPI, unsigned GlobalBaseReg, unsigned char OpFlags)
addConstantPoolReference - This function is used to add a reference to the base of a constant value s...
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
static const MachineInstrBuilder & addRegReg(const MachineInstrBuilder &MIB, unsigned Reg1, bool isKill1, unsigned Reg2, bool isKill2)
addRegReg - This function is used to add a memory reference of the form: [Reg + Reg].
unsigned getKillRegState(bool B)
gep_type_iterator gep_type_begin(const User *GEP)
bool CC_X86(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
static const MachineInstrBuilder & addDirectMem(const MachineInstrBuilder &MIB, unsigned Reg)
addDirectMem - This function is used to add a direct memory reference to the current instruction – th...
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
Description of the encoding of one expression Op.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
static MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
X86AddressMode - This struct holds a generalized full x86 address mode.
enum llvm::X86AddressMode::@639 BaseType
void getFullAddress(SmallVectorImpl< MachineOperand > &MO)
union llvm::X86AddressMode::@640 Base