48 class X86FastISel final :
public FastISel {
65 X86ScalarSSEf64 = Subtarget->
hasSSE2();
66 X86ScalarSSEf32 = Subtarget->hasSSE1();
69 bool fastSelectInstruction(
const Instruction *
I)
override;
78 bool fastLowerArguments()
override;
79 bool fastLowerCall(CallLoweringInfo &CLI)
override;
80 bool fastLowerIntrinsicCall(
const IntrinsicInst *II)
override;
82 #include "X86GenFastISel.inc"
85 bool X86FastEmitCompare(
const Value *LHS,
const Value *RHS,
EVT VT,
89 unsigned &ResultReg,
unsigned Alignment = 1);
93 bool X86FastEmitStore(
EVT VT,
unsigned ValReg,
bool ValIsKill,
129 bool X86SelectFPExtOrFPTrunc(
const Instruction *
I,
unsigned Opc,
137 return Subtarget->getInstrInfo();
148 unsigned fastMaterializeConstant(
const Constant *
C)
override;
150 unsigned fastMaterializeAlloca(
const AllocaInst *
C)
override;
152 unsigned fastMaterializeFloatZero(
const ConstantFP *CF)
override;
156 bool isScalarFPTypeInSSEReg(
EVT VT)
const {
157 return (VT ==
MVT::f64 && X86ScalarSSEf64) ||
158 (VT ==
MVT::f32 && X86ScalarSSEf32);
161 bool isTypeLegal(
Type *Ty,
MVT &VT,
bool AllowI1 =
false);
163 bool IsMemcpySmall(uint64_t Len);
174 unsigned fastEmitInst_rrrr(
unsigned MachineInstOpcode,
176 bool Op0IsKill,
unsigned Op1,
bool Op1IsKill,
177 unsigned Op2,
bool Op2IsKill,
unsigned Op3,
183 static std::pair<X86::CondCode, bool>
186 bool NeedSwap =
false;
218 return std::make_pair(CC, NeedSwap);
221 static std::pair<unsigned, bool>
224 bool NeedSwap =
false;
253 return std::make_pair(CC, NeedSwap);
274 if (!isa<ExtractValueInst>(Cond))
277 const auto *EV = cast<ExtractValueInst>(Cond);
278 if (!isa<IntrinsicInst>(EV->getAggregateOperand()))
281 const auto *II = cast<IntrinsicInst>(EV->getAggregateOperand());
283 const Function *Callee = II->getCalledFunction();
285 cast<StructType>(Callee->
getReturnType())->getTypeAtIndex(0U);
286 if (!isTypeLegal(RetTy, RetVT))
293 switch (II->getIntrinsicID()) {
294 default:
return false;
295 case Intrinsic::sadd_with_overflow:
296 case Intrinsic::ssub_with_overflow:
297 case Intrinsic::smul_with_overflow:
298 case Intrinsic::umul_with_overflow: TmpCC =
X86::COND_O;
break;
299 case Intrinsic::uadd_with_overflow:
300 case Intrinsic::usub_with_overflow: TmpCC =
X86::COND_B;
break;
310 for (
auto Itr = std::prev(Start); Itr !=
End; --Itr) {
313 if (!isa<ExtractValueInst>(Itr))
317 const auto *EVI = cast<ExtractValueInst>(Itr);
318 if (EVI->getAggregateOperand() != II)
326 bool X86FastISel::isTypeLegal(
Type *Ty,
MVT &VT,
bool AllowI1) {
327 EVT evt = TLI.getValueType(DL, Ty,
true);
335 if (VT ==
MVT::f64 && !X86ScalarSSEf64)
337 if (VT ==
MVT::f32 && !X86ScalarSSEf32)
346 return (AllowI1 && VT ==
MVT::i1) || TLI.isTypeLegal(VT);
349 #include "X86GenCallingConv.inc"
356 unsigned Alignment) {
357 bool HasSSE41 = Subtarget->hasSSE41();
358 bool HasAVX = Subtarget->hasAVX();
359 bool HasAVX2 = Subtarget->hasAVX2();
360 bool HasAVX512 = Subtarget->hasAVX512();
361 bool HasVLX = Subtarget->hasVLX();
368 default:
return false;
372 RC = &X86::GR8RegClass;
376 RC = &X86::GR16RegClass;
380 RC = &X86::GR32RegClass;
385 RC = &X86::GR64RegClass;
388 if (X86ScalarSSEf32) {
389 Opc = HasAVX512 ? X86::VMOVSSZrm : HasAVX ? X86::VMOVSSrm : X86::MOVSSrm;
390 RC = &X86::FR32RegClass;
393 RC = &X86::RFP32RegClass;
397 if (X86ScalarSSEf64) {
398 Opc = HasAVX512 ? X86::VMOVSDZrm : HasAVX ? X86::VMOVSDrm : X86::MOVSDrm;
399 RC = &X86::FR64RegClass;
402 RC = &X86::RFP64RegClass;
409 if (IsNonTemporal && Alignment >= 16 && HasSSE41)
410 Opc = HasVLX ? X86::VMOVNTDQAZ128rm :
411 HasAVX ? X86::VMOVNTDQArm : X86::MOVNTDQArm;
412 else if (Alignment >= 16)
413 Opc = HasVLX ? X86::VMOVAPSZ128rm :
414 HasAVX ? X86::VMOVAPSrm : X86::MOVAPSrm;
416 Opc = HasVLX ? X86::VMOVUPSZ128rm :
417 HasAVX ? X86::VMOVUPSrm : X86::MOVUPSrm;
418 RC = &X86::VR128RegClass;
421 if (IsNonTemporal && Alignment >= 16 && HasSSE41)
422 Opc = HasVLX ? X86::VMOVNTDQAZ128rm :
423 HasAVX ? X86::VMOVNTDQArm : X86::MOVNTDQArm;
424 else if (Alignment >= 16)
425 Opc = HasVLX ? X86::VMOVAPDZ128rm :
426 HasAVX ? X86::VMOVAPDrm : X86::MOVAPDrm;
428 Opc = HasVLX ? X86::VMOVUPDZ128rm :
429 HasAVX ? X86::VMOVUPDrm : X86::MOVUPDrm;
430 RC = &X86::VR128RegClass;
436 if (IsNonTemporal && Alignment >= 16)
437 Opc = HasVLX ? X86::VMOVNTDQAZ128rm :
438 HasAVX ? X86::VMOVNTDQArm : X86::MOVNTDQArm;
439 else if (Alignment >= 16)
440 Opc = HasVLX ? X86::VMOVDQA64Z128rm :
441 HasAVX ? X86::VMOVDQArm : X86::MOVDQArm;
443 Opc = HasVLX ? X86::VMOVDQU64Z128rm :
444 HasAVX ? X86::VMOVDQUrm : X86::MOVDQUrm;
445 RC = &X86::VR128RegClass;
449 if (IsNonTemporal && Alignment >= 32 && HasAVX2)
450 Opc = HasVLX ? X86::VMOVNTDQAZ256rm : X86::VMOVNTDQAYrm;
451 else if (Alignment >= 32)
452 Opc = HasVLX ? X86::VMOVAPSZ256rm : X86::VMOVAPSYrm;
454 Opc = HasVLX ? X86::VMOVUPSZ256rm : X86::VMOVUPSYrm;
455 RC = &X86::VR256RegClass;
459 if (IsNonTemporal && Alignment >= 32 && HasAVX2)
460 Opc = X86::VMOVNTDQAYrm;
461 else if (Alignment >= 32)
462 Opc = HasVLX ? X86::VMOVAPDZ256rm : X86::VMOVAPDYrm;
464 Opc = HasVLX ? X86::VMOVUPDZ256rm : X86::VMOVUPDYrm;
465 RC = &X86::VR256RegClass;
472 if (IsNonTemporal && Alignment >= 32 && HasAVX2)
473 Opc = X86::VMOVNTDQAYrm;
474 else if (Alignment >= 32)
475 Opc = HasVLX ? X86::VMOVDQA64Z256rm : X86::VMOVDQAYrm;
477 Opc = HasVLX ? X86::VMOVDQU64Z256rm : X86::VMOVDQUYrm;
478 RC = &X86::VR256RegClass;
482 if (IsNonTemporal && Alignment >= 64)
483 Opc = X86::VMOVNTDQAZrm;
485 Opc = (Alignment >= 64) ? X86::VMOVAPSZrm : X86::VMOVUPSZrm;
486 RC = &X86::VR512RegClass;
490 if (IsNonTemporal && Alignment >= 64)
491 Opc = X86::VMOVNTDQAZrm;
493 Opc = (Alignment >= 64) ? X86::VMOVAPDZrm : X86::VMOVUPDZrm;
494 RC = &X86::VR512RegClass;
503 if (IsNonTemporal && Alignment >= 64)
504 Opc = X86::VMOVNTDQAZrm;
506 Opc = (Alignment >= 64) ? X86::VMOVDQA64Zrm : X86::VMOVDQU64Zrm;
507 RC = &X86::VR512RegClass;
511 ResultReg = createResultReg(RC);
513 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), ResultReg);
524 bool X86FastISel::X86FastEmitStore(
EVT VT,
unsigned ValReg,
bool ValIsKill,
527 bool HasSSE2 = Subtarget->hasSSE2();
528 bool HasSSE4A = Subtarget->hasSSE4A();
529 bool HasAVX = Subtarget->hasAVX();
530 bool HasAVX512 = Subtarget->hasAVX512();
531 bool HasVLX = Subtarget->hasVLX();
538 default:
return false;
541 unsigned AndResult = createResultReg(&X86::GR8RegClass);
542 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
543 TII.get(X86::AND8ri), AndResult)
548 case MVT::i8: Opc = X86::MOV8mr;
break;
549 case MVT::i16: Opc = X86::MOV16mr;
break;
551 Opc = (IsNonTemporal && HasSSE2) ? X86::MOVNTImr : X86::MOV32mr;
555 Opc = (IsNonTemporal && HasSSE2) ? X86::MOVNTI_64mr : X86::MOV64mr;
558 if (X86ScalarSSEf32) {
559 if (IsNonTemporal && HasSSE4A)
562 Opc = HasAVX512 ? X86::VMOVSSZmr :
563 HasAVX ? X86::VMOVSSmr : X86::MOVSSmr;
568 if (X86ScalarSSEf32) {
569 if (IsNonTemporal && HasSSE4A)
572 Opc = HasAVX512 ? X86::VMOVSDZmr :
573 HasAVX ? X86::VMOVSDmr : X86::MOVSDmr;
580 Opc = HasVLX ? X86::VMOVNTPSZ128mr :
581 HasAVX ? X86::VMOVNTPSmr : X86::MOVNTPSmr;
583 Opc = HasVLX ? X86::VMOVAPSZ128mr :
584 HasAVX ? X86::VMOVAPSmr : X86::MOVAPSmr;
586 Opc = HasVLX ? X86::VMOVUPSZ128mr :
587 HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr;
592 Opc = HasVLX ? X86::VMOVNTPDZ128mr :
593 HasAVX ? X86::VMOVNTPDmr : X86::MOVNTPDmr;
595 Opc = HasVLX ? X86::VMOVAPDZ128mr :
596 HasAVX ? X86::VMOVAPDmr : X86::MOVAPDmr;
598 Opc = HasVLX ? X86::VMOVUPDZ128mr :
599 HasAVX ? X86::VMOVUPDmr : X86::MOVUPDmr;
607 Opc = HasVLX ? X86::VMOVNTDQZ128mr :
608 HasAVX ? X86::VMOVNTDQmr : X86::MOVNTDQmr;
610 Opc = HasVLX ? X86::VMOVDQA64Z128mr :
611 HasAVX ? X86::VMOVDQAmr : X86::MOVDQAmr;
613 Opc = HasVLX ? X86::VMOVDQU64Z128mr :
614 HasAVX ? X86::VMOVDQUmr : X86::MOVDQUmr;
620 Opc = HasVLX ? X86::VMOVNTPSZ256mr : X86::VMOVNTPSYmr;
622 Opc = HasVLX ? X86::VMOVAPSZ256mr : X86::VMOVAPSYmr;
624 Opc = HasVLX ? X86::VMOVUPSZ256mr : X86::VMOVUPSYmr;
630 Opc = HasVLX ? X86::VMOVNTPDZ256mr : X86::VMOVNTPDYmr;
632 Opc = HasVLX ? X86::VMOVAPDZ256mr : X86::VMOVAPDYmr;
634 Opc = HasVLX ? X86::VMOVUPDZ256mr : X86::VMOVUPDYmr;
643 Opc = HasVLX ? X86::VMOVNTDQZ256mr : X86::VMOVNTDQYmr;
645 Opc = HasVLX ? X86::VMOVDQA64Z256mr : X86::VMOVDQAYmr;
647 Opc = HasVLX ? X86::VMOVDQU64Z256mr : X86::VMOVDQUYmr;
652 Opc = IsNonTemporal ? X86::VMOVNTPSZmr : X86::VMOVAPSZmr;
654 Opc = X86::VMOVUPSZmr;
659 Opc = IsNonTemporal ? X86::VMOVNTPDZmr : X86::VMOVAPDZmr;
661 Opc = X86::VMOVUPDZmr;
671 Opc = IsNonTemporal ? X86::VMOVNTDQZmr : X86::VMOVDQA64Zmr;
673 Opc = X86::VMOVDQU64Zmr;
686 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, Desc);
694 bool X86FastISel::X86FastEmitStore(
EVT VT,
const Value *Val,
698 if (isa<ConstantPointerNull>(Val))
702 if (
const ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
710 case MVT::i8: Opc = X86::MOV8mi;
break;
711 case MVT::i16: Opc = X86::MOV16mi;
break;
712 case MVT::i32: Opc = X86::MOV32mi;
break;
716 Opc = X86::MOV64mi32;
722 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc));
724 : CI->getZExtValue());
731 unsigned ValReg = getRegForValue(Val);
735 bool ValKill = hasTrivialKill(Val);
736 return X86FastEmitStore(VT, ValReg, ValKill, AM, MMO, Aligned);
743 unsigned Src,
EVT SrcVT,
744 unsigned &ResultReg) {
756 if (
const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
762 if (GV->isThreadLocal())
768 if (!Subtarget->isPICStyleRIPRel() ||
774 unsigned char GVFlags = Subtarget->classifyGlobalReference(GV);
779 AM.
Base.
Reg = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
785 if (Subtarget->isPICStyleRIPRel()) {
799 if (I != LocalValueMap.
end() && I->second != 0) {
811 SavePoint SaveInsertPt = enterLocalValueArea();
813 if (TLI.getPointerTy(DL) ==
MVT::i64) {
815 RC = &X86::GR64RegClass;
817 if (Subtarget->isPICStyleRIPRel())
821 RC = &X86::GR32RegClass;
824 LoadReg = createResultReg(RC);
826 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), LoadReg);
830 leaveLocalValueArea(SaveInsertPt);
833 LocalValueMap[V] = LoadReg;
845 if (!AM.
GV || !Subtarget->isPICStyleRIPRel()) {
847 AM.
Base.
Reg = getRegForValue(V);
865 const User *U =
nullptr;
866 unsigned Opcode = Instruction::UserOp1;
867 if (
const Instruction *I = dyn_cast<Instruction>(V)) {
871 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(V)) ||
872 FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) {
873 Opcode = I->getOpcode();
876 }
else if (
const ConstantExpr *
C = dyn_cast<ConstantExpr>(V)) {
877 Opcode =
C->getOpcode();
882 if (Ty->getAddressSpace() > 255)
889 case Instruction::BitCast:
891 return X86SelectAddress(U->
getOperand(0), AM);
893 case Instruction::IntToPtr:
896 TLI.getPointerTy(DL))
897 return X86SelectAddress(U->
getOperand(0), AM);
900 case Instruction::PtrToInt:
902 if (TLI.getValueType(DL, U->
getType()) == TLI.getPointerTy(DL))
903 return X86SelectAddress(U->
getOperand(0), AM);
906 case Instruction::Alloca: {
910 FuncInfo.StaticAllocaMap.find(A);
911 if (SI != FuncInfo.StaticAllocaMap.end()) {
922 uint64_t Disp = (int32_t)AM.
Disp + (uint64_t)CI->getSExtValue();
926 return X86SelectAddress(U->
getOperand(0), AM);
932 case Instruction::GetElementPtr: {
936 uint64_t Disp = (int32_t)AM.
Disp;
938 unsigned Scale = AM.
Scale;
943 i != e; ++
i, ++GTI) {
945 if (
StructType *STy = GTI.getStructTypeOrNull()) {
953 uint64_t S =
DL.getTypeAllocSize(GTI.getIndexedType());
955 if (
const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
957 Disp += CI->getSExtValue() * S;
960 if (canFoldAddIntoGEP(U, Op)) {
963 cast<ConstantInt>(cast<AddOperator>(
Op)->getOperand(1));
966 Op = cast<AddOperator>(
Op)->getOperand(0);
970 (!AM.
GV || !Subtarget->isPICStyleRIPRel()) &&
971 (S == 1 || S == 2 || S == 4 || S == 8)) {
974 IndexReg = getRegForGEPIndex(Op).first;
980 goto unsupported_gep;
994 dyn_cast<GetElementPtrInst>(U->
getOperand(0))) {
999 }
else if (X86SelectAddress(U->
getOperand(0), AM)) {
1008 if (handleConstantAddresses(I, AM))
1018 return handleConstantAddresses(V, AM);
1024 const User *U =
nullptr;
1025 unsigned Opcode = Instruction::UserOp1;
1054 InMBB = I->
getParent() == FuncInfo.MBB->getBasicBlock();
1055 }
else if (
const ConstantExpr *
C = dyn_cast<ConstantExpr>(V)) {
1056 Opcode =
C->getOpcode();
1062 case Instruction::BitCast:
1065 return X86SelectCallAddress(U->
getOperand(0), AM);
1068 case Instruction::IntToPtr:
1072 TLI.getPointerTy(DL))
1073 return X86SelectCallAddress(U->
getOperand(0), AM);
1076 case Instruction::PtrToInt:
1078 if (InMBB && TLI.getValueType(DL, U->
getType()) == TLI.getPointerTy(DL))
1079 return X86SelectCallAddress(U->
getOperand(0), AM);
1084 if (
const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
1090 if (Subtarget->isPICStyleRIPRel() &&
1095 if (GV->hasDLLImportStorageClass())
1100 if (GVar->isThreadLocal())
1108 if (Subtarget->isPICStyleRIPRel()) {
1114 AM.
GVOpFlags = Subtarget->classifyLocalReference(
nullptr);
1121 if (!AM.
GV || !Subtarget->isPICStyleRIPRel()) {
1123 AM.
Base.
Reg = getRegForValue(V);
1138 bool X86FastISel::X86SelectStore(
const Instruction *I) {
1146 if (TLI.supportSwiftError()) {
1149 if (
const Argument *Arg = dyn_cast<Argument>(PtrV)) {
1150 if (Arg->hasSwiftErrorAttr())
1154 if (
const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
1155 if (Alloca->isSwiftError())
1164 if (!isTypeLegal(Val->
getType(), VT,
true))
1168 unsigned ABIAlignment =
DL.getABITypeAlignment(Val->
getType());
1170 Alignment = ABIAlignment;
1171 bool Aligned = Alignment >= ABIAlignment;
1174 if (!X86SelectAddress(Ptr, AM))
1177 return X86FastEmitStore(VT, Val, AM, createMachineMemOperandFor(I), Aligned);
1181 bool X86FastISel::X86SelectRet(
const Instruction *I) {
1187 if (!FuncInfo.CanLowerReturn)
1190 if (TLI.supportSwiftError() &&
1191 F.getAttributes().hasAttrSomewhere(Attribute::SwiftError))
1194 if (TLI.supportSplitCSR(FuncInfo.MF))
1233 unsigned Reg = getRegForValue(RV);
1238 if (ValLocs.size() != 1)
1255 unsigned SrcReg = Reg + VA.
getValNo();
1256 EVT SrcVT = TLI.getValueType(DL, RV->
getType());
1259 if (SrcVT != DstVT) {
1263 if (!Outs[0].
Flags.isZExt() && !Outs[0].Flags.isSExt())
1269 if (Outs[0].
Flags.isSExt())
1271 SrcReg = fastEmitZExtFromI1(
MVT::i8, SrcReg,
false);
1286 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1287 TII.get(TargetOpcode::COPY), DstReg).addReg(SrcReg);
1303 "SRetReturnReg should have been set in LowerFormalArguments()!");
1304 unsigned RetReg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
1305 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1306 TII.get(TargetOpcode::COPY), RetReg).addReg(Reg);
1313 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1314 TII.get(Subtarget->is64Bit() ? X86::RETIQ : X86::RETIL))
1317 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1318 TII.get(Subtarget->is64Bit() ? X86::RETQ : X86::RETL));
1320 for (
unsigned i = 0, e = RetRegs.
size();
i != e; ++
i)
1327 bool X86FastISel::X86SelectLoad(
const Instruction *I) {
1335 if (TLI.supportSwiftError()) {
1338 if (
const Argument *Arg = dyn_cast<Argument>(SV)) {
1339 if (Arg->hasSwiftErrorAttr())
1343 if (
const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
1344 if (Alloca->isSwiftError())
1350 if (!isTypeLegal(LI->
getType(), VT,
true))
1356 if (!X86SelectAddress(Ptr, AM))
1360 unsigned ABIAlignment =
DL.getABITypeAlignment(LI->
getType());
1362 Alignment = ABIAlignment;
1364 unsigned ResultReg = 0;
1365 if (!X86FastEmitLoad(VT, AM, createMachineMemOperandFor(LI), ResultReg,
1369 updateValueMap(I, ResultReg);
1374 bool HasAVX = Subtarget->
hasAVX();
1375 bool X86ScalarSSEf32 = Subtarget->
hasSSE1();
1376 bool X86ScalarSSEf64 = Subtarget->
hasSSE2();
1380 case MVT::i8:
return X86::CMP8rr;
1381 case MVT::i16:
return X86::CMP16rr;
1382 case MVT::i32:
return X86::CMP32rr;
1383 case MVT::i64:
return X86::CMP64rr;
1385 return X86ScalarSSEf32 ? (HasAVX ? X86::VUCOMISSrr : X86::UCOMISSrr) : 0;
1387 return X86ScalarSSEf64 ? (HasAVX ? X86::VUCOMISDrr : X86::UCOMISDrr) : 0;
1403 return X86::CMP16ri8;
1404 return X86::CMP16ri;
1407 return X86::CMP32ri8;
1408 return X86::CMP32ri;
1411 return X86::CMP64ri8;
1415 return X86::CMP64ri32;
1420 bool X86FastISel::X86FastEmitCompare(
const Value *Op0,
const Value *Op1,
EVT VT,
1422 unsigned Op0Reg = getRegForValue(Op0);
1423 if (Op0Reg == 0)
return false;
1426 if (isa<ConstantPointerNull>(Op1))
1432 if (
const ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
1434 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, CurDbgLoc,
TII.get(CompareImmOpc))
1436 .
addImm(Op1C->getSExtValue());
1442 if (CompareOpc == 0)
return false;
1444 unsigned Op1Reg = getRegForValue(Op1);
1445 if (Op1Reg == 0)
return false;
1446 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, CurDbgLoc,
TII.get(CompareOpc))
1453 bool X86FastISel::X86SelectCmp(
const Instruction *I) {
1454 const CmpInst *CI = cast<CmpInst>(
I);
1465 unsigned ResultReg = 0;
1466 switch (Predicate) {
1469 ResultReg = createResultReg(&X86::GR32RegClass);
1470 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(X86::MOV32r0),
1472 ResultReg = fastEmitInst_extractsubreg(
MVT::i8, ResultReg,
true,
1479 ResultReg = createResultReg(&X86::GR8RegClass);
1480 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(X86::MOV8ri),
1481 ResultReg).addImm(1);
1487 updateValueMap(I, ResultReg);
1499 if (RHSC && RHSC->isNullValue())
1504 static const uint16_t SETFOpcTable[2][3] = {
1505 { X86::SETEr, X86::SETNPr, X86::AND8rr },
1506 { X86::SETNEr, X86::SETPr, X86::OR8rr }
1508 const uint16_t *SETFOpc =
nullptr;
1509 switch (Predicate) {
1515 ResultReg = createResultReg(&X86::GR8RegClass);
1517 if (!X86FastEmitCompare(LHS, RHS, VT, I->
getDebugLoc()))
1520 unsigned FlagReg1 = createResultReg(&X86::GR8RegClass);
1521 unsigned FlagReg2 = createResultReg(&X86::GR8RegClass);
1522 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(SETFOpc[0]),
1524 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(SETFOpc[1]),
1526 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(SETFOpc[2]),
1527 ResultReg).addReg(FlagReg1).
addReg(FlagReg2);
1528 updateValueMap(I, ResultReg);
1542 if (!X86FastEmitCompare(LHS, RHS, VT, I->
getDebugLoc()))
1545 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), ResultReg);
1546 updateValueMap(I, ResultReg);
1550 bool X86FastISel::X86SelectZExt(
const Instruction *I) {
1551 EVT DstVT = TLI.getValueType(DL, I->
getType());
1552 if (!TLI.isTypeLegal(DstVT))
1555 unsigned ResultReg = getRegForValue(I->
getOperand(0));
1563 ResultReg = fastEmitZExtFromI1(
MVT::i8, ResultReg,
false);
1575 case MVT::i8: MovInst = X86::MOVZX32rr8;
break;
1576 case MVT::i16: MovInst = X86::MOVZX32rr16;
break;
1577 case MVT::i32: MovInst = X86::MOV32rr;
break;
1581 unsigned Result32 = createResultReg(&X86::GR32RegClass);
1582 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(MovInst), Result32)
1585 ResultReg = createResultReg(&X86::GR64RegClass);
1586 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::SUBREG_TO_REG),
1589 }
else if (DstVT !=
MVT::i8) {
1596 updateValueMap(I, ResultReg);
1600 bool X86FastISel::X86SelectBranch(
const Instruction *I) {
1617 switch (Predicate) {
1632 if (CmpRHSC && CmpRHSC->isNullValue())
1637 if (FuncInfo.MBB->isLayoutSuccessor(TrueMBB)) {
1647 bool NeedExtraBranch =
false;
1648 switch (Predicate) {
1654 NeedExtraBranch =
true;
1669 if (!X86FastEmitCompare(CmpLHS, CmpRHS, VT, CI->
getDebugLoc()))
1672 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(BranchOpc))
1677 if (NeedExtraBranch) {
1678 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(X86::JP_1))
1682 finishCondBranch(BI->
getParent(), TrueMBB, FalseMBB);
1689 if (TI->hasOneUse() && TI->getParent() == I->
getParent() &&
1690 isTypeLegal(TI->getOperand(0)->getType(), SourceVT)) {
1691 unsigned TestOpc = 0;
1694 case MVT::i8: TestOpc = X86::TEST8ri;
break;
1695 case MVT::i16: TestOpc = X86::TEST16ri;
break;
1696 case MVT::i32: TestOpc = X86::TEST32ri;
break;
1697 case MVT::i64: TestOpc = X86::TEST64ri32;
break;
1700 unsigned OpReg = getRegForValue(TI->getOperand(0));
1701 if (OpReg == 0)
return false;
1703 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TestOpc))
1704 .addReg(OpReg).
addImm(1);
1706 unsigned JmpOpc = X86::JNE_1;
1707 if (FuncInfo.MBB->isLayoutSuccessor(TrueMBB)) {
1712 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(JmpOpc))
1715 finishCondBranch(BI->
getParent(), TrueMBB, FalseMBB);
1719 }
else if (foldX86XALUIntrinsic(CC, BI, BI->
getCondition())) {
1728 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(BranchOpc))
1730 finishCondBranch(BI->
getParent(), TrueMBB, FalseMBB);
1738 if (OpReg == 0)
return false;
1741 if (
MRI.getRegClass(OpReg) == &X86::VK1RegClass) {
1742 unsigned KOpReg = OpReg;
1743 OpReg = createResultReg(&X86::GR8RegClass);
1744 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1745 TII.get(TargetOpcode::COPY), OpReg)
1748 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(X86::TEST8ri))
1751 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(X86::JNE_1))
1753 finishCondBranch(BI->
getParent(), TrueMBB, FalseMBB);
1757 bool X86FastISel::X86SelectShift(
const Instruction *I) {
1758 unsigned CReg = 0, OpReg = 0;
1762 RC = &X86::GR8RegClass;
1764 case Instruction::LShr: OpReg = X86::SHR8rCL;
break;
1765 case Instruction::AShr: OpReg = X86::SAR8rCL;
break;
1766 case Instruction::Shl: OpReg = X86::SHL8rCL;
break;
1767 default:
return false;
1771 RC = &X86::GR16RegClass;
1773 case Instruction::LShr: OpReg = X86::SHR16rCL;
break;
1774 case Instruction::AShr: OpReg = X86::SAR16rCL;
break;
1775 case Instruction::Shl: OpReg = X86::SHL16rCL;
break;
1776 default:
return false;
1780 RC = &X86::GR32RegClass;
1782 case Instruction::LShr: OpReg = X86::SHR32rCL;
break;
1783 case Instruction::AShr: OpReg = X86::SAR32rCL;
break;
1784 case Instruction::Shl: OpReg = X86::SHL32rCL;
break;
1785 default:
return false;
1789 RC = &X86::GR64RegClass;
1791 case Instruction::LShr: OpReg = X86::SHR64rCL;
break;
1792 case Instruction::AShr: OpReg = X86::SAR64rCL;
break;
1793 case Instruction::Shl: OpReg = X86::SHL64rCL;
break;
1794 default:
return false;
1801 if (!isTypeLegal(I->
getType(), VT))
1804 unsigned Op0Reg = getRegForValue(I->
getOperand(0));
1805 if (Op0Reg == 0)
return false;
1807 unsigned Op1Reg = getRegForValue(I->
getOperand(1));
1808 if (Op1Reg == 0)
return false;
1809 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY),
1810 CReg).addReg(Op1Reg);
1814 if (CReg != X86::CL)
1815 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1819 unsigned ResultReg = createResultReg(RC);
1820 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(OpReg), ResultReg)
1822 updateValueMap(I, ResultReg);
1826 bool X86FastISel::X86SelectDivRem(
const Instruction *I) {
1827 const static unsigned NumTypes = 4;
1828 const static unsigned NumOps = 4;
1829 const static bool S =
true;
1830 const static bool U =
false;
1831 const static unsigned Copy = TargetOpcode::COPY;
1841 const static struct DivRemEntry {
1847 struct DivRemResult {
1849 unsigned OpSignExtend;
1853 unsigned DivRemResultReg;
1855 } ResultTable[NumOps];
1856 } OpTable[NumTypes] = {
1857 { &X86::GR8RegClass, X86::AX, 0, {
1858 { X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AL, S },
1859 { X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AH, S },
1860 { X86::DIV8r, 0, X86::MOVZX16rr8, X86::AL, U },
1861 { X86::DIV8r, 0, X86::MOVZX16rr8, X86::AH, U },
1864 { &X86::GR16RegClass, X86::AX, X86::DX, {
1865 { X86::IDIV16r, X86::CWD, Copy, X86::AX, S },
1866 { X86::IDIV16r, X86::CWD, Copy, X86::DX, S },
1867 { X86::DIV16r, X86::MOV32r0, Copy, X86::AX, U },
1868 { X86::DIV16r, X86::MOV32r0, Copy, X86::DX, U },
1871 { &X86::GR32RegClass, X86::EAX, X86::EDX, {
1872 { X86::IDIV32r, X86::CDQ, Copy, X86::EAX, S },
1873 { X86::IDIV32r, X86::CDQ, Copy, X86::EDX, S },
1874 { X86::DIV32r, X86::MOV32r0, Copy, X86::EAX, U },
1875 { X86::DIV32r, X86::MOV32r0, Copy, X86::EDX, U },
1878 { &X86::GR64RegClass, X86::RAX, X86::RDX, {
1879 { X86::IDIV64r, X86::CQO, Copy, X86::RAX, S },
1880 { X86::IDIV64r, X86::CQO, Copy, X86::RDX, S },
1881 { X86::DIV64r, X86::MOV32r0, Copy, X86::RAX, U },
1882 { X86::DIV64r, X86::MOV32r0, Copy, X86::RDX, U },
1888 if (!isTypeLegal(I->
getType(), VT))
1891 unsigned TypeIndex, OpIndex;
1893 default:
return false;
1894 case MVT::i8: TypeIndex = 0;
break;
1895 case MVT::i16: TypeIndex = 1;
break;
1896 case MVT::i32: TypeIndex = 2;
break;
1898 if (!Subtarget->is64Bit())
1905 case Instruction::SDiv: OpIndex = 0;
break;
1906 case Instruction::SRem: OpIndex = 1;
break;
1907 case Instruction::UDiv: OpIndex = 2;
break;
1908 case Instruction::URem: OpIndex = 3;
break;
1911 const DivRemEntry &TypeEntry = OpTable[TypeIndex];
1912 const DivRemEntry::DivRemResult &OpEntry = TypeEntry.ResultTable[OpIndex];
1913 unsigned Op0Reg = getRegForValue(I->
getOperand(0));
1916 unsigned Op1Reg = getRegForValue(I->
getOperand(1));
1921 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1922 TII.get(OpEntry.OpCopy), TypeEntry.LowInReg).addReg(Op0Reg);
1924 if (OpEntry.OpSignExtend) {
1925 if (OpEntry.IsOpSigned)
1926 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1927 TII.get(OpEntry.OpSignExtend));
1929 unsigned Zero32 = createResultReg(&X86::GR32RegClass);
1930 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1931 TII.get(X86::MOV32r0), Zero32);
1937 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1938 TII.get(Copy), TypeEntry.HighInReg)
1939 .addReg(Zero32, 0, X86::sub_16bit);
1941 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1942 TII.get(Copy), TypeEntry.HighInReg)
1945 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1946 TII.get(TargetOpcode::SUBREG_TO_REG), TypeEntry.HighInReg)
1952 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1953 TII.get(OpEntry.OpDivRem)).addReg(Op1Reg);
1962 unsigned ResultReg = 0;
1963 if ((I->
getOpcode() == Instruction::SRem ||
1965 OpEntry.DivRemResultReg == X86::AH && Subtarget->is64Bit()) {
1966 unsigned SourceSuperReg = createResultReg(&X86::GR16RegClass);
1967 unsigned ResultSuperReg = createResultReg(&X86::GR16RegClass);
1968 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1969 TII.get(Copy), SourceSuperReg).addReg(X86::AX);
1972 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(X86::SHR16ri),
1973 ResultSuperReg).addReg(SourceSuperReg).
addImm(8);
1976 ResultReg = fastEmitInst_extractsubreg(
MVT::i8, ResultSuperReg,
1977 true, X86::sub_8bit);
1981 ResultReg = createResultReg(TypeEntry.RC);
1982 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Copy), ResultReg)
1983 .addReg(OpEntry.DivRemResultReg);
1985 updateValueMap(I, ResultReg);
1992 bool X86FastISel::X86FastEmitCMoveSelect(
MVT RetVT,
const Instruction *I) {
1994 if (!Subtarget->hasCMov())
1998 if (RetVT < MVT::i16 || RetVT >
MVT::i64)
2003 bool NeedTest =
true;
2014 static const uint16_t SETFOpcTable[2][3] = {
2015 { X86::SETNPr, X86::SETEr , X86::TEST8rr },
2016 { X86::SETPr, X86::SETNEr, X86::OR8rr }
2018 const uint16_t *SETFOpc =
nullptr;
2019 switch (Predicate) {
2022 SETFOpc = &SETFOpcTable[0][0];
2026 SETFOpc = &SETFOpcTable[1][0];
2040 EVT CmpVT = TLI.getValueType(DL, CmpLHS->
getType());
2042 if (!X86FastEmitCompare(CmpLHS, CmpRHS, CmpVT, CI->
getDebugLoc()))
2046 unsigned FlagReg1 = createResultReg(&X86::GR8RegClass);
2047 unsigned FlagReg2 = createResultReg(&X86::GR8RegClass);
2048 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(SETFOpc[0]),
2050 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(SETFOpc[1]),
2052 auto const &II =
TII.get(SETFOpc[2]);
2053 if (II.getNumDefs()) {
2054 unsigned TmpReg = createResultReg(&X86::GR8RegClass);
2055 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, TmpReg)
2058 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
2063 }
else if (foldX86XALUIntrinsic(CC, I, Cond)) {
2066 unsigned TmpReg = getRegForValue(Cond);
2079 unsigned CondReg = getRegForValue(Cond);
2082 bool CondIsKill = hasTrivialKill(Cond);
2085 if (
MRI.getRegClass(CondReg) == &X86::VK1RegClass) {
2086 unsigned KCondReg = CondReg;
2087 CondReg = createResultReg(&X86::GR8RegClass);
2088 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2089 TII.get(TargetOpcode::COPY), CondReg)
2092 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(X86::TEST8ri))
2100 unsigned RHSReg = getRegForValue(RHS);
2101 bool RHSIsKill = hasTrivialKill(RHS);
2103 unsigned LHSReg = getRegForValue(LHS);
2104 bool LHSIsKill = hasTrivialKill(LHS);
2106 if (!LHSReg || !RHSReg)
2110 unsigned ResultReg = fastEmitInst_rr(Opc, RC, RHSReg, RHSIsKill,
2112 updateValueMap(I, ResultReg);
2121 bool X86FastISel::X86FastEmitSSESelect(
MVT RetVT,
const Instruction *I) {
2130 !((Subtarget->hasSSE1() && RetVT ==
MVT::f32) ||
2131 (Subtarget->hasSSE2() && RetVT ==
MVT::f64)))
2143 if (CmpRHSC && CmpRHSC->isNullValue())
2157 static const uint16_t OpcTable[2][4] = {
2158 { X86::CMPSSrr, X86::ANDPSrr, X86::ANDNPSrr, X86::ORPSrr },
2159 { X86::CMPSDrr, X86::ANDPDrr, X86::ANDNPDrr, X86::ORPDrr }
2162 const uint16_t *Opc =
nullptr;
2164 default:
return false;
2165 case MVT::f32: Opc = &OpcTable[0][0];
break;
2166 case MVT::f64: Opc = &OpcTable[1][0];
break;
2172 unsigned LHSReg = getRegForValue(LHS);
2173 bool LHSIsKill = hasTrivialKill(LHS);
2175 unsigned RHSReg = getRegForValue(RHS);
2176 bool RHSIsKill = hasTrivialKill(RHS);
2178 unsigned CmpLHSReg = getRegForValue(CmpLHS);
2179 bool CmpLHSIsKill = hasTrivialKill(CmpLHS);
2181 unsigned CmpRHSReg = getRegForValue(CmpRHS);
2182 bool CmpRHSIsKill = hasTrivialKill(CmpRHS);
2184 if (!LHSReg || !RHSReg || !CmpLHS || !CmpRHS)
2190 if (Subtarget->hasAVX512()) {
2195 unsigned CmpOpcode =
2196 (RetVT ==
MVT::f32) ? X86::VCMPSSZrr : X86::VCMPSDZrr;
2197 unsigned CmpReg = fastEmitInst_rri(CmpOpcode, VK1, CmpLHSReg, CmpLHSIsKill,
2198 CmpRHSReg, CmpRHSIsKill, CC);
2202 unsigned ImplicitDefReg = createResultReg(VR128X);
2203 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2204 TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg);
2208 unsigned MovOpcode =
2209 (RetVT ==
MVT::f32) ? X86::VMOVSSZrrk : X86::VMOVSDZrrk;
2210 unsigned MovReg = fastEmitInst_rrrr(MovOpcode, VR128X, RHSReg, RHSIsKill,
2211 CmpReg,
true, ImplicitDefReg,
true,
2214 ResultReg = createResultReg(RC);
2215 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2216 TII.get(TargetOpcode::COPY), ResultReg).addReg(MovReg);
2218 }
else if (Subtarget->hasAVX()) {
2226 unsigned CmpOpcode =
2227 (RetVT ==
MVT::f32) ? X86::VCMPSSrr : X86::VCMPSDrr;
2228 unsigned BlendOpcode =
2229 (RetVT ==
MVT::f32) ? X86::VBLENDVPSrr : X86::VBLENDVPDrr;
2231 unsigned CmpReg = fastEmitInst_rri(CmpOpcode, RC, CmpLHSReg, CmpLHSIsKill,
2232 CmpRHSReg, CmpRHSIsKill, CC);
2233 unsigned VBlendReg = fastEmitInst_rrr(BlendOpcode, VR128, RHSReg, RHSIsKill,
2234 LHSReg, LHSIsKill, CmpReg,
true);
2235 ResultReg = createResultReg(RC);
2236 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2237 TII.get(TargetOpcode::COPY), ResultReg).addReg(VBlendReg);
2240 unsigned CmpReg = fastEmitInst_rri(Opc[0], RC, CmpLHSReg, CmpLHSIsKill,
2241 CmpRHSReg, CmpRHSIsKill, CC);
2242 unsigned AndReg = fastEmitInst_rr(Opc[1], VR128, CmpReg,
false,
2244 unsigned AndNReg = fastEmitInst_rr(Opc[2], VR128, CmpReg,
true,
2246 unsigned OrReg = fastEmitInst_rr(Opc[3], VR128, AndNReg,
true,
2248 ResultReg = createResultReg(RC);
2249 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2250 TII.get(TargetOpcode::COPY), ResultReg).addReg(OrReg);
2252 updateValueMap(I, ResultReg);
2256 bool X86FastISel::X86FastEmitPseudoSelect(
MVT RetVT,
const Instruction *I) {
2261 default:
return false;
2262 case MVT::i8: Opc = X86::CMOV_GR8;
break;
2263 case MVT::i16: Opc = X86::CMOV_GR16;
break;
2264 case MVT::i32: Opc = X86::CMOV_GR32;
break;
2265 case MVT::f32: Opc = X86::CMOV_FR32;
break;
2266 case MVT::f64: Opc = X86::CMOV_FR64;
break;
2288 EVT CmpVT = TLI.getValueType(DL, CmpLHS->
getType());
2289 if (!X86FastEmitCompare(CmpLHS, CmpRHS, CmpVT, CI->
getDebugLoc()))
2292 unsigned CondReg = getRegForValue(Cond);
2295 bool CondIsKill = hasTrivialKill(Cond);
2298 if (
MRI.getRegClass(CondReg) == &X86::VK1RegClass) {
2299 unsigned KCondReg = CondReg;
2300 CondReg = createResultReg(&X86::GR8RegClass);
2301 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2302 TII.get(TargetOpcode::COPY), CondReg)
2305 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(X86::TEST8ri))
2313 unsigned LHSReg = getRegForValue(LHS);
2314 bool LHSIsKill = hasTrivialKill(LHS);
2316 unsigned RHSReg = getRegForValue(RHS);
2317 bool RHSIsKill = hasTrivialKill(RHS);
2319 if (!LHSReg || !RHSReg)
2324 unsigned ResultReg =
2325 fastEmitInst_rri(Opc, RC, RHSReg, RHSIsKill, LHSReg, LHSIsKill, CC);
2326 updateValueMap(I, ResultReg);
2330 bool X86FastISel::X86SelectSelect(
const Instruction *I) {
2332 if (!isTypeLegal(I->
getType(), RetVT))
2336 if (
const auto *CI = dyn_cast<CmpInst>(I->
getOperand(0))) {
2338 const Value *Opnd =
nullptr;
2339 switch (Predicate) {
2346 unsigned OpReg = getRegForValue(Opnd);
2349 bool OpIsKill = hasTrivialKill(Opnd);
2351 unsigned ResultReg = createResultReg(RC);
2352 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2353 TII.get(TargetOpcode::COPY), ResultReg)
2355 updateValueMap(I, ResultReg);
2361 if (X86FastEmitCMoveSelect(RetVT, I))
2365 if (X86FastEmitSSESelect(RetVT, I))
2370 if (X86FastEmitPseudoSelect(RetVT, I))
2376 bool X86FastISel::X86SelectSIToFP(
const Instruction *I) {
2380 if (!Subtarget->hasAVX())
2387 unsigned OpReg = getRegForValue(I->
getOperand(0));
2396 Opcode = X86::VCVTSI2SDrr;
2397 RC = &X86::FR64RegClass;
2400 Opcode = X86::VCVTSI2SSrr;
2401 RC = &X86::FR32RegClass;
2405 unsigned ImplicitDefReg = createResultReg(RC);
2406 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2407 TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg);
2408 unsigned ResultReg =
2409 fastEmitInst_rr(Opcode, RC, ImplicitDefReg,
true, OpReg,
false);
2410 updateValueMap(I, ResultReg);
2415 bool X86FastISel::X86SelectFPExtOrFPTrunc(
const Instruction *I,
2419 I->
getOpcode() == Instruction::FPTrunc) &&
2420 "Instruction must be an FPExt or FPTrunc!");
2422 unsigned OpReg = getRegForValue(I->
getOperand(0));
2426 unsigned ResultReg = createResultReg(RC);
2428 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpc),
2430 if (Subtarget->hasAVX())
2433 updateValueMap(I, ResultReg);
2437 bool X86FastISel::X86SelectFPExt(
const Instruction *I) {
2441 unsigned Opc = Subtarget->hasAVX() ? X86::VCVTSS2SDrr : X86::CVTSS2SDrr;
2442 return X86SelectFPExtOrFPTrunc(I, Opc, &X86::FR64RegClass);
2448 bool X86FastISel::X86SelectFPTrunc(
const Instruction *I) {
2452 unsigned Opc = Subtarget->hasAVX() ? X86::VCVTSD2SSrr : X86::CVTSD2SSrr;
2453 return X86SelectFPExtOrFPTrunc(I, Opc, &X86::FR32RegClass);
2459 bool X86FastISel::X86SelectTrunc(
const Instruction *I) {
2461 EVT DstVT = TLI.getValueType(DL, I->
getType());
2466 if (!TLI.isTypeLegal(SrcVT))
2469 unsigned InputReg = getRegForValue(I->
getOperand(0));
2476 updateValueMap(I, InputReg);
2480 bool KillInputReg =
false;
2481 if (!Subtarget->is64Bit()) {
2485 (SrcVT ==
MVT::i16) ? &X86::GR16_ABCDRegClass : &X86::GR32_ABCDRegClass;
2486 unsigned CopyReg = createResultReg(CopyRC);
2487 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2488 TII.get(TargetOpcode::COPY), CopyReg).addReg(InputReg);
2490 KillInputReg =
true;
2494 unsigned ResultReg = fastEmitInst_extractsubreg(
MVT::i8,
2495 InputReg, KillInputReg,
2500 updateValueMap(I, ResultReg);
2504 bool X86FastISel::IsMemcpySmall(uint64_t Len) {
2505 return Len <= (Subtarget->is64Bit() ? 32 : 16);
2512 if (!IsMemcpySmall(Len))
2515 bool i64Legal = Subtarget->is64Bit();
2520 if (Len >= 8 && i64Legal)
2530 bool RV = X86FastEmitLoad(VT, SrcAM,
nullptr, Reg);
2531 RV &= X86FastEmitStore(VT, Reg,
true, DestAM);
2532 assert(RV &&
"Failed to emit load or store??");
2536 DestAM.
Disp += Size;
2543 bool X86FastISel::fastLowerIntrinsicCall(
const IntrinsicInst *II) {
2546 default:
return false;
2547 case Intrinsic::convert_from_fp16:
2548 case Intrinsic::convert_to_fp16: {
2549 if (Subtarget->useSoftFloat() || !Subtarget->hasF16C())
2553 unsigned InputReg = getRegForValue(Op);
2558 bool IsFloatToHalf = II->
getIntrinsicID() == Intrinsic::convert_to_fp16;
2559 if (IsFloatToHalf) {
2567 unsigned ResultReg = 0;
2569 if (IsFloatToHalf) {
2577 InputReg = fastEmitInst_ri(X86::VCVTPS2PHrr, RC, InputReg,
false, 4);
2580 ResultReg = createResultReg(&X86::GR32RegClass);
2581 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2582 TII.get(X86::VMOVPDI2DIrr), ResultReg)
2586 unsigned RegIdx = X86::sub_16bit;
2587 ResultReg = fastEmitInst_extractsubreg(
MVT::i16, ResultReg,
true, RegIdx);
2598 InputReg = fastEmitInst_r(X86::VCVTPH2PSrr, RC, InputReg,
true);
2602 ResultReg = createResultReg(&X86::FR32RegClass);
2603 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2604 TII.get(TargetOpcode::COPY), ResultReg)
2608 updateValueMap(II, ResultReg);
2611 case Intrinsic::frameaddress: {
2619 if (!isTypeLegal(RetTy, VT))
2627 case MVT::i32: Opc = X86::MOV32rm; RC = &X86::GR32RegClass;
break;
2628 case MVT::i64: Opc = X86::MOV64rm; RC = &X86::GR64RegClass;
break;
2638 assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
2639 (FrameReg == X86::EBP && VT ==
MVT::i32)) &&
2640 "Invalid Frame Register!");
2645 unsigned SrcReg = createResultReg(RC);
2646 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2647 TII.get(TargetOpcode::COPY), SrcReg).addReg(FrameReg);
2655 unsigned Depth = cast<ConstantInt>(II->
getOperand(0))->getZExtValue();
2657 DestReg = createResultReg(RC);
2659 TII.get(Opc), DestReg), SrcReg);
2663 updateValueMap(II, SrcReg);
2666 case Intrinsic::memcpy: {
2667 const MemCpyInst *MCI = cast<MemCpyInst>(II);
2672 if (isa<ConstantInt>(MCI->
getLength())) {
2675 uint64_t Len = cast<ConstantInt>(MCI->
getLength())->getZExtValue();
2676 if (IsMemcpySmall(Len)) {
2678 if (!X86SelectAddress(MCI->
getRawDest(), DestAM) ||
2681 TryEmitSmallMemcpy(DestAM, SrcAM, Len);
2686 unsigned SizeWidth = Subtarget->is64Bit() ? 64 : 32;
2695 case Intrinsic::memset: {
2696 const MemSetInst *MSI = cast<MemSetInst>(II);
2701 unsigned SizeWidth = Subtarget->is64Bit() ? 64 : 32;
2710 case Intrinsic::stackprotector: {
2712 EVT PtrTy = TLI.getPointerTy(DL);
2721 if (!X86SelectAddress(Slot, AM))
return false;
2722 if (!X86FastEmitStore(PtrTy, Op1, AM))
return false;
2725 case Intrinsic::dbg_declare: {
2735 "Expected inlined-at fields to agree");
2742 case Intrinsic::trap: {
2746 case Intrinsic::sqrt: {
2747 if (!Subtarget->hasSSE1())
2750 Type *RetTy = II->getCalledFunction()->getReturnType();
2753 if (!isTypeLegal(RetTy, VT))
2759 static const uint16_t SqrtOpc[2][2] = {
2760 {X86::SQRTSSr, X86::VSQRTSSr},
2761 {X86::SQRTSDr, X86::VSQRTSDr}
2763 bool HasAVX = Subtarget->hasAVX();
2767 default:
return false;
2768 case MVT::f32: Opc = SqrtOpc[0][HasAVX]; RC = &X86::FR32RegClass;
break;
2769 case MVT::f64: Opc = SqrtOpc[1][HasAVX]; RC = &X86::FR64RegClass;
break;
2772 const Value *SrcVal = II->getArgOperand(0);
2773 unsigned SrcReg = getRegForValue(SrcVal);
2778 unsigned ImplicitDefReg = 0;
2780 ImplicitDefReg = createResultReg(RC);
2781 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2782 TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg);
2785 unsigned ResultReg = createResultReg(RC);
2787 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc),
2791 MIB.
addReg(ImplicitDefReg);
2795 updateValueMap(II, ResultReg);
2798 case Intrinsic::sadd_with_overflow:
2799 case Intrinsic::uadd_with_overflow:
2800 case Intrinsic::ssub_with_overflow:
2801 case Intrinsic::usub_with_overflow:
2802 case Intrinsic::smul_with_overflow:
2803 case Intrinsic::umul_with_overflow: {
2806 const Function *Callee = II->getCalledFunction();
2808 Type *RetTy = Ty->getTypeAtIndex(0U);
2811 "Overflow value expected to be an i1");
2814 if (!isTypeLegal(RetTy, VT))
2817 if (VT < MVT::i8 || VT > MVT::i64)
2820 const Value *LHS = II->getArgOperand(0);
2821 const Value *RHS = II->getArgOperand(1);
2824 if (isa<ConstantInt>(LHS) && !isa<ConstantInt>(RHS) &&
2825 isCommutativeIntrinsic(II))
2828 bool UseIncDec =
false;
2829 if (isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isOne())
2832 unsigned BaseOpc, CondOpc;
2833 switch (II->getIntrinsicID()) {
2835 case Intrinsic::sadd_with_overflow:
2837 CondOpc = X86::SETOr;
2839 case Intrinsic::uadd_with_overflow:
2840 BaseOpc =
ISD::ADD; CondOpc = X86::SETBr;
break;
2841 case Intrinsic::ssub_with_overflow:
2842 BaseOpc = UseIncDec ? unsigned(
X86ISD::DEC) : unsigned(ISD::
SUB);
2843 CondOpc = X86::SETOr;
2845 case Intrinsic::usub_with_overflow:
2846 BaseOpc =
ISD::SUB; CondOpc = X86::SETBr;
break;
2847 case Intrinsic::smul_with_overflow:
2849 case Intrinsic::umul_with_overflow:
2853 unsigned LHSReg = getRegForValue(LHS);
2856 bool LHSIsKill = hasTrivialKill(LHS);
2858 unsigned ResultReg = 0;
2860 if (
const auto *CI = dyn_cast<ConstantInt>(RHS)) {
2861 static const uint16_t Opc[2][4] = {
2862 { X86::INC8r, X86::INC16r, X86::INC32r, X86::INC64r },
2863 { X86::DEC8r, X86::DEC16r, X86::DEC32r, X86::DEC64r }
2867 ResultReg = createResultReg(TLI.getRegClassFor(VT));
2869 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2870 TII.get(Opc[IsDec][VT.SimpleTy-
MVT::i8]), ResultReg)
2873 ResultReg = fastEmit_ri(VT, VT, BaseOpc, LHSReg, LHSIsKill,
2874 CI->getZExtValue());
2880 RHSReg = getRegForValue(RHS);
2883 RHSIsKill = hasTrivialKill(RHS);
2884 ResultReg = fastEmit_rr(VT, VT, BaseOpc, LHSReg, LHSIsKill, RHSReg,
2891 static const uint16_t MULOpc[] =
2892 { X86::MUL8r, X86::MUL16r, X86::MUL32r, X86::MUL64r };
2893 static const MCPhysReg Reg[] = { X86::AL, X86::AX, X86::EAX, X86::RAX };
2896 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2897 TII.get(TargetOpcode::COPY), Reg[VT.SimpleTy-
MVT::i8])
2899 ResultReg = fastEmitInst_r(MULOpc[VT.SimpleTy-
MVT::i8],
2900 TLI.getRegClassFor(VT), RHSReg, RHSIsKill);
2902 static const uint16_t MULOpc[] =
2903 { X86::IMUL8r, X86::IMUL16rr, X86::IMUL32rr, X86::IMUL64rr };
2907 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2908 TII.get(TargetOpcode::COPY), X86::AL)
2910 ResultReg = fastEmitInst_r(MULOpc[0], TLI.getRegClassFor(VT), RHSReg,
2913 ResultReg = fastEmitInst_rr(MULOpc[VT.SimpleTy-
MVT::i8],
2914 TLI.getRegClassFor(VT), LHSReg, LHSIsKill,
2922 unsigned ResultReg2 = createResultReg(&X86::GR8RegClass);
2923 assert((ResultReg+1) == ResultReg2 &&
"Nonconsecutive result registers.");
2924 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(CondOpc),
2927 updateValueMap(II, ResultReg, 2);
2930 case Intrinsic::x86_sse_cvttss2si:
2931 case Intrinsic::x86_sse_cvttss2si64:
2932 case Intrinsic::x86_sse2_cvttsd2si:
2933 case Intrinsic::x86_sse2_cvttsd2si64: {
2935 switch (II->getIntrinsicID()) {
2937 case Intrinsic::x86_sse_cvttss2si:
2938 case Intrinsic::x86_sse_cvttss2si64:
2939 if (!Subtarget->hasSSE1())
2941 IsInputDouble =
false;
2943 case Intrinsic::x86_sse2_cvttsd2si:
2944 case Intrinsic::x86_sse2_cvttsd2si64:
2945 if (!Subtarget->hasSSE2())
2947 IsInputDouble =
true;
2951 Type *RetTy = II->getCalledFunction()->getReturnType();
2953 if (!isTypeLegal(RetTy, VT))
2956 static const uint16_t CvtOpc[2][2][2] = {
2957 { { X86::CVTTSS2SIrr, X86::VCVTTSS2SIrr },
2958 { X86::CVTTSS2SI64rr, X86::VCVTTSS2SI64rr } },
2959 { { X86::CVTTSD2SIrr, X86::VCVTTSD2SIrr },
2960 { X86::CVTTSD2SI64rr, X86::VCVTTSD2SI64rr } }
2962 bool HasAVX = Subtarget->hasAVX();
2966 case MVT::i32: Opc = CvtOpc[IsInputDouble][0][HasAVX];
break;
2967 case MVT::i64: Opc = CvtOpc[IsInputDouble][1][HasAVX];
break;
2971 const Value *Op = II->getArgOperand(0);
2972 while (
auto *
IE = dyn_cast<InsertElementInst>(Op)) {
2973 const Value *Index =
IE->getOperand(2);
2974 if (!isa<ConstantInt>(Index))
2976 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
2979 Op =
IE->getOperand(1);
2982 Op =
IE->getOperand(0);
2985 unsigned Reg = getRegForValue(Op);
2989 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
2990 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), ResultReg)
2993 updateValueMap(II, ResultReg);
2999 bool X86FastISel::fastLowerArguments() {
3000 if (!FuncInfo.CanLowerReturn)
3011 if (Subtarget->isCallingConvWin64(CC))
3014 if (!Subtarget->is64Bit())
3018 unsigned GPRCnt = 0;
3019 unsigned FPRCnt = 0;
3021 for (
auto const &Arg : F->
args()) {
3032 Type *ArgTy = Arg.getType();
3036 EVT ArgVT = TLI.getValueType(DL, ArgTy);
3037 if (!ArgVT.
isSimple())
return false;
3039 default:
return false;
3046 if (!Subtarget->hasSSE1())
3059 static const MCPhysReg GPR32ArgRegs[] = {
3060 X86::EDI, X86::ESI, X86::EDX, X86::ECX, X86::R8D, X86::R9D
3062 static const MCPhysReg GPR64ArgRegs[] = {
3063 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8 , X86::R9
3066 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
3067 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
3070 unsigned GPRIdx = 0;
3071 unsigned FPRIdx = 0;
3072 for (
auto const &Arg : F->
args()) {
3073 MVT VT = TLI.getSimpleValueType(DL, Arg.getType());
3078 case MVT::i32: SrcReg = GPR32ArgRegs[GPRIdx++];
break;
3079 case MVT::i64: SrcReg = GPR64ArgRegs[GPRIdx++];
break;
3081 case MVT::f64: SrcReg = XMMArgRegs[FPRIdx++];
break;
3083 unsigned DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC);
3087 unsigned ResultReg = createResultReg(RC);
3088 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3089 TII.get(TargetOpcode::COPY), ResultReg)
3091 updateValueMap(&Arg, ResultReg);
3115 bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
3116 auto &OutVals = CLI.OutVals;
3117 auto &OutFlags = CLI.OutFlags;
3118 auto &OutRegs = CLI.OutRegs;
3119 auto &
Ins = CLI.Ins;
3120 auto &InRegs = CLI.InRegs;
3122 bool &IsTailCall = CLI.IsTailCall;
3123 bool IsVarArg = CLI.IsVarArg;
3124 const Value *Callee = CLI.Callee;
3127 bool Is64Bit = Subtarget->is64Bit();
3128 bool IsWin64 = Subtarget->isCallingConvWin64(CC);
3132 default:
return false;
3156 if (IsVarArg && IsWin64)
3160 if (CLI.CS && CLI.CS->hasInAllocaArgument())
3163 for (
auto Flag : CLI.OutFlags)
3164 if (
Flag.isSwiftError())
3173 for (
int i = 0, e = OutVals.size();
i != e; ++
i) {
3174 Value *&Val = OutVals[
i];
3176 if (
auto *CI = dyn_cast<ConstantInt>(Val)) {
3177 if (CI->getBitWidth() < 32) {
3190 if (TI && TI->getType()->isIntegerTy(1) && CLI.CS &&
3191 (TI->getParent() == CLI.CS->getInstruction()->getParent()) &&
3193 Value *PrevVal = TI->getOperand(0);
3194 ResultReg = getRegForValue(PrevVal);
3199 if (!isTypeLegal(PrevVal->
getType(), VT))
3203 fastEmit_ri(VT, VT,
ISD::AND, ResultReg, hasTrivialKill(PrevVal), 1);
3205 if (!isTypeLegal(Val->
getType(), VT))
3207 ResultReg = getRegForValue(Val);
3219 CCState CCInfo(CC, IsVarArg, *FuncInfo.MF, ArgLocs, CLI.RetTy->getContext());
3225 CCInfo.AnalyzeCallOperands(OutVTs, OutFlags, CC_X86);
3228 unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
3231 unsigned AdjStackDown =
TII.getCallFrameSetupOpcode();
3232 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(AdjStackDown))
3233 .addImm(NumBytes).
addImm(0);
3237 for (
unsigned i = 0, e = ArgLocs.
size();
i != e; ++
i) {
3245 unsigned ArgReg = ArgRegs[VA.
getValNo()];
3252 "Unexpected extend");
3259 assert(Emitted &&
"Failed to emit a sext!"); (void)Emitted;
3265 "Unexpected extend");
3270 ArgReg = fastEmitZExtFromI1(
MVT::i8, ArgReg,
false);
3279 assert(Emitted &&
"Failed to emit a zext!"); (void)Emitted;
3285 "Unexpected extend");
3295 assert(Emitted &&
"Failed to emit a aext!"); (void)Emitted;
3302 assert(ArgReg &&
"Failed to emit a bitcast!");
3322 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3323 TII.get(TargetOpcode::COPY), VA.
getLocReg()).addReg(ArgReg);
3329 if (isa<UndefValue>(ArgVal))
3335 AM.
Disp = LocMemOffset;
3337 unsigned Alignment =
DL.getABITypeAlignment(ArgVal->
getType());
3344 if (!TryEmitSmallMemcpy(AM, SrcAM, Flags.
getByValSize()))
3346 }
else if (isa<ConstantInt>(ArgVal) || isa<ConstantPointerNull>(ArgVal)) {
3350 if (!X86FastEmitStore(ArgVT, ArgVal, AM, MMO))
3353 bool ValIsKill = hasTrivialKill(ArgVal);
3354 if (!X86FastEmitStore(ArgVT, ArgReg, ValIsKill, AM, MMO))
3362 if (Subtarget->isPICStyleGOT()) {
3363 unsigned Base = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
3364 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3365 TII.get(TargetOpcode::COPY), X86::EBX).addReg(Base);
3368 if (Is64Bit && IsVarArg && !IsWin64) {
3379 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
3380 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
3382 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs);
3383 assert((Subtarget->hasSSE1() || !NumXMMRegs)
3384 &&
"SSE registers cannot be used when SSE is disabled");
3385 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(X86::MOV8ri),
3386 X86::AL).addImm(NumXMMRegs);
3392 if (!X86SelectCallAddress(Callee, CalleeAM))
3395 unsigned CalleeOp = 0;
3397 if (CalleeAM.
GV !=
nullptr) {
3399 }
else if (CalleeAM.
Base.
Reg != 0) {
3408 unsigned CallOpc = Is64Bit ? X86::CALL64r : X86::CALL32r;
3409 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(CallOpc))
3413 assert(GV &&
"Not a direct call");
3414 unsigned CallOpc = Is64Bit ? X86::CALL64pcrel32 : X86::CALLpcrel32;
3417 unsigned char OpFlags = Subtarget->classifyGlobalFunctionReference(GV);
3422 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(CallOpc));
3424 MIB.
addSym(Symbol, OpFlags);
3431 MIB.
addRegMask(TRI.getCallPreservedMask(*FuncInfo.MF, CC));
3434 if (Subtarget->isPICStyleGOT())
3437 if (Is64Bit && IsVarArg && !IsWin64)
3441 for (
auto Reg : OutRegs)
3445 unsigned NumBytesForCalleeToPop =
3447 TM.Options.GuaranteedTailCallOpt)
3450 unsigned AdjStackUp =
TII.getCallFrameDestroyOpcode();
3451 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(AdjStackUp))
3452 .addImm(NumBytes).
addImm(NumBytesForCalleeToPop);
3456 CCState CCRetInfo(CC, IsVarArg, *FuncInfo.MF, RVLocs,
3457 CLI.RetTy->getContext());
3461 unsigned ResultReg = FuncInfo.CreateRegs(CLI.RetTy);
3462 for (
unsigned i = 0;
i != RVLocs.
size(); ++
i) {
3465 unsigned CopyReg = ResultReg +
i;
3469 ((Is64Bit ||
Ins[i].Flags.
isInReg()) && !Subtarget->hasSSE1())) {
3476 isScalarFPTypeInSSEReg(VA.
getValVT())) {
3478 CopyReg = createResultReg(&X86::RFP80RegClass);
3482 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3483 TII.get(TargetOpcode::COPY), CopyReg).addReg(VA.
getLocReg());
3491 unsigned Opc = ResVT ==
MVT::f32 ? X86::ST_Fp80m32 : X86::ST_Fp80m64;
3497 Opc = ResVT ==
MVT::f32 ? X86::MOVSSrm : X86::MOVSDrm;
3499 TII.get(Opc), ResultReg +
i), FI);
3503 CLI.ResultReg = ResultReg;
3504 CLI.NumResultRegs = RVLocs.
size();
3511 X86FastISel::fastSelectInstruction(
const Instruction *I) {
3515 return X86SelectLoad(I);
3517 return X86SelectStore(I);
3519 return X86SelectRet(I);
3520 case Instruction::ICmp:
3521 case Instruction::FCmp:
3522 return X86SelectCmp(I);
3523 case Instruction::ZExt:
3524 return X86SelectZExt(I);
3525 case Instruction::Br:
3526 return X86SelectBranch(I);
3527 case Instruction::LShr:
3528 case Instruction::AShr:
3529 case Instruction::Shl:
3530 return X86SelectShift(I);
3531 case Instruction::SDiv:
3532 case Instruction::UDiv:
3533 case Instruction::SRem:
3534 case Instruction::URem:
3535 return X86SelectDivRem(I);
3537 return X86SelectSelect(I);
3538 case Instruction::Trunc:
3539 return X86SelectTrunc(I);
3540 case Instruction::FPExt:
3541 return X86SelectFPExt(I);
3542 case Instruction::FPTrunc:
3543 return X86SelectFPTrunc(I);
3544 case Instruction::SIToFP:
3545 return X86SelectSIToFP(I);
3546 case Instruction::IntToPtr:
3547 case Instruction::PtrToInt: {
3549 EVT DstVT = TLI.getValueType(DL, I->
getType());
3551 return X86SelectZExt(I);
3553 return X86SelectTrunc(I);
3554 unsigned Reg = getRegForValue(I->
getOperand(0));
3555 if (Reg == 0)
return false;
3556 updateValueMap(I, Reg);
3559 case Instruction::BitCast: {
3561 if (!Subtarget->hasSSE2())
3565 EVT DstVT = TLI.getValueType(DL, I->
getType());
3580 unsigned Reg = getRegForValue(I->
getOperand(0));
3586 updateValueMap(I, Reg);
3594 unsigned X86FastISel::X86MaterializeInt(
const ConstantInt *CI,
MVT VT) {
3600 unsigned SrcReg = fastEmitInst_(X86::MOV32r0, &X86::GR32RegClass);
3605 return fastEmitInst_extractsubreg(
MVT::i8, SrcReg,
true,
3608 return fastEmitInst_extractsubreg(
MVT::i16, SrcReg,
true,
3613 unsigned ResultReg = createResultReg(&X86::GR64RegClass);
3614 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3615 TII.get(TargetOpcode::SUBREG_TO_REG), ResultReg)
3626 case MVT::i8: Opc = X86::MOV8ri;
break;
3627 case MVT::i16: Opc = X86::MOV16ri;
break;
3628 case MVT::i32: Opc = X86::MOV32ri;
break;
3633 Opc = X86::MOV64ri32;
3639 if (VT == MVT::i64 && Opc == X86::MOV32ri) {
3640 unsigned SrcReg = fastEmitInst_i(Opc, &X86::GR32RegClass, Imm);
3641 unsigned ResultReg = createResultReg(&X86::GR64RegClass);
3642 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3643 TII.get(TargetOpcode::SUBREG_TO_REG), ResultReg)
3647 return fastEmitInst_i(Opc, TLI.getRegClassFor(VT), Imm);
3650 unsigned X86FastISel::X86MaterializeFP(
const ConstantFP *CFP,
MVT VT) {
3652 return fastMaterializeFloatZero(CFP);
3665 if (X86ScalarSSEf32) {
3666 Opc = Subtarget->hasAVX() ? X86::VMOVSSrm : X86::MOVSSrm;
3667 RC = &X86::FR32RegClass;
3669 Opc = X86::LD_Fp32m;
3670 RC = &X86::RFP32RegClass;
3674 if (X86ScalarSSEf64) {
3675 Opc = Subtarget->hasAVX() ? X86::VMOVSDrm : X86::MOVSDrm;
3676 RC = &X86::FR64RegClass;
3678 Opc = X86::LD_Fp64m;
3679 RC = &X86::RFP64RegClass;
3688 unsigned Align =
DL.getPrefTypeAlignment(CFP->
getType());
3691 Align =
DL.getTypeAllocSize(CFP->
getType());
3695 unsigned PICBase = 0;
3696 unsigned char OpFlag = Subtarget->classifyLocalReference(
nullptr);
3698 PICBase = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
3700 PICBase = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
3705 unsigned CPI = MCP.getConstantPoolIndex(CFP, Align);
3706 unsigned ResultReg = createResultReg(RC);
3709 unsigned AddrReg = createResultReg(&X86::GR64RegClass);
3710 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(X86::MOV64ri),
3712 .addConstantPoolIndex(CPI, 0, OpFlag);
3714 TII.get(Opc), ResultReg);
3724 TII.get(Opc), ResultReg),
3725 CPI, PICBase, OpFlag);
3729 unsigned X86FastISel::X86MaterializeGV(
const GlobalValue *GV,
MVT VT) {
3736 if (X86SelectAddress(GV, AM)) {
3743 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
3745 TLI.getPointerTy(DL) ==
MVT::i64) {
3748 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(X86::MOV64ri),
3750 .addGlobalAddress(GV);
3754 ? (Subtarget->isTarget64BitILP32() ? X86::LEA64_32r : X86::LEA32r)
3757 TII.get(Opc), ResultReg), AM);
3764 unsigned X86FastISel::fastMaterializeConstant(
const Constant *
C) {
3765 EVT CEVT = TLI.getValueType(DL, C->
getType(),
true);
3772 if (
const auto *CI = dyn_cast<ConstantInt>(C))
3773 return X86MaterializeInt(CI, VT);
3774 else if (
const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
3775 return X86MaterializeFP(CFP, VT);
3776 else if (
const GlobalValue *GV = dyn_cast<GlobalValue>(C))
3777 return X86MaterializeGV(GV, VT);
3782 unsigned X86FastISel::fastMaterializeAlloca(
const AllocaInst *C) {
3790 if (!FuncInfo.StaticAllocaMap.count(C))
3795 if (!X86SelectAddress(C, AM))
3799 ? (Subtarget->isTarget64BitILP32() ? X86::LEA64_32r : X86::LEA32r)
3802 unsigned ResultReg = createResultReg(RC);
3804 TII.get(Opc), ResultReg), AM);
3808 unsigned X86FastISel::fastMaterializeFloatZero(
const ConstantFP *CF) {
3810 if (!isTypeLegal(CF->
getType(), VT))
3819 if (X86ScalarSSEf32) {
3820 Opc = X86::FsFLD0SS;
3821 RC = &X86::FR32RegClass;
3823 Opc = X86::LD_Fp032;
3824 RC = &X86::RFP32RegClass;
3828 if (X86ScalarSSEf64) {
3829 Opc = X86::FsFLD0SD;
3830 RC = &X86::FR64RegClass;
3832 Opc = X86::LD_Fp064;
3833 RC = &X86::RFP64RegClass;
3841 unsigned ResultReg = createResultReg(RC);
3842 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), ResultReg);
3847 bool X86FastISel::tryToFoldLoadIntoMI(
MachineInstr *
MI,
unsigned OpNo,
3851 if (!X86SelectAddress(Ptr, AM))
3856 unsigned Size =
DL.getTypeAllocSize(LI->
getType());
3860 Alignment =
DL.getABITypeAlignment(LI->
getType());
3866 *FuncInfo.MF, *MI, OpNo, AddrOps, FuncInfo.InsertPt, Size, Alignment,
3876 unsigned OperandNo = 0;
3885 if (IndexReg == MO.
getReg())
3890 Result->
addMemOperand(*FuncInfo.MF, createMachineMemOperandFor(LI));
3895 unsigned X86FastISel::fastEmitInst_rrrr(
unsigned MachineInstOpcode,
3897 unsigned Op0,
bool Op0IsKill,
3898 unsigned Op1,
bool Op1IsKill,
3899 unsigned Op2,
bool Op2IsKill,
3900 unsigned Op3,
bool Op3IsKill) {
3903 unsigned ResultReg = createResultReg(RC);
3910 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
3916 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
3921 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3931 return new X86FastISel(funcInfo, libInfo);
void setFrameAddressIsTaken(bool T)
unsigned GetCondBranchFromCond(CondCode CC)
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
constexpr bool isUInt< 32 >(uint64_t x)
Return a value (possibly void), from a function.
Value * getValueOperand()
const MachineInstrBuilder & addMetadata(const MDNode *MD) const
void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeCallResult - Analyze the return values of a call, incorporating info about the passed values i...
void push_back(const T &Elt)
mop_iterator operands_end()
This class is the base class for the comparison instructions.
LLVM Argument representation.
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
LocInfo getLocInfo() const
static const MachineInstrBuilder & addConstantPoolReference(const MachineInstrBuilder &MIB, unsigned CPI, unsigned GlobalBaseReg, unsigned char OpFlags)
addConstantPoolReference - This function is used to add a reference to the base of a constant value s...
unsigned constrainOperandRegClass(const MachineFunction &MF, const TargetRegisterInfo &TRI, MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II, unsigned Reg, unsigned OpIdx)
Try to constrain Reg so that it is usable by argument OpIdx of the provided MCInstrDesc II...
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
unsigned getNumOperands() const
Describe properties that are true of each instruction in the target description file.
MachineInstrBuilder MachineInstrBuilder &DefMI const MCInstrDesc & Desc
unsigned getScalarSizeInBits() const
bool is512BitVector() const
is512BitVector - Return true if this is a 512-bit vector type.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE, etc.
0 1 0 0 True if ordered and less than
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
constexpr bool isInt< 8 >(int64_t x)
unsigned getSizeInBits() const
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
unsigned getByValSize() const
1 1 1 0 True if unordered or not equal
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
This class wraps the llvm.memset intrinsic.
Type * getReturnType() const
Returns the type of the ret val.
const Function * getParent() const
Return the enclosing method, or null if none.
An instruction for reading from memory.
MO_GOTPCREL - On a symbol operand this indicates that the immediate is offset to the GOT entry for th...
bool isCalleePop(CallingConv::ID CallingConv, bool is64Bit, bool IsVarArg, bool GuaranteeTCO)
Determines whether the callee is required to pop its own arguments.
bool hasAttribute(unsigned Index, Attribute::AttrKind Kind) const
Return true if the attribute exists at the given index.
unsigned getValNo() const
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
bool bitsLT(EVT VT) const
bitsLT - Return true if this has less bits than VT.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
X86MachineFunctionInfo - This class is derived from MachineFunction and contains private X86 target-s...
1 0 0 1 True if unordered or equal
Value * getAddress() const
unsigned getSize() const
Return the size of the register in bytes, which is also the size of a stack slot allocated to hold a ...
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
A description of a memory reference used in the backend.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
struct fuzzer::@269 Flags
const HexagonInstrInfo * TII
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
Class to represent struct types.
A Use represents the edge between a Value definition and its users.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
static bool isGlobalStubReference(unsigned char TargetFlag)
isGlobalStubReference - Return true if the specified TargetFlag operand is a reference to a stub for ...
unsigned getNumArgOperands() const
Return the number of call arguments.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
Reg
All possible values of the reg field in the ModR/M byte.
0 1 0 1 True if ordered and less than or equal
static Constant * getSExt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
const Triple & getTargetTriple() const
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
X86_ThisCall - Similar to X86_StdCall.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
unsigned getNumOperands() const
Access to explicit operands of the instruction.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
auto reverse(ContainerTy &&C, typename std::enable_if< has_rbegin< ContainerTy >::value >::type *=nullptr) -> decltype(make_range(C.rbegin(), C.rend()))
static bool isGlobalRelativeToPICBase(unsigned char TargetFlag)
isGlobalRelativeToPICBase - Return true if the specified global value reference is relative to a 32-b...
static Constant * getZExt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
unsigned getLocReg() const
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
A constant value that is initialized with an expression using other constant values.
void GetReturnInfo(Type *ReturnType, AttributeSet attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags...
bool is64Bit() const
Is this x86_64? (disregarding specific ABI / programming model)
Simple integer binary arithmetic operators.
This instruction compares its operands according to the predicate given to the constructor.
BasicBlock * getSuccessor(unsigned i) const
X86_FastCall - 'fast' analog of X86_StdCall.
An instruction for storing to memory.
bool isArrayTy() const
True if this is an instance of ArrayType.
void addMemOperand(MachineFunction &MF, MachineMemOperand *MO)
Add a MachineMemOperand to the machine instruction.
void setStackProtectorIndex(int I)
This class represents a truncation of integer types.
bool isAtomic() const
Return true if this instruction has an AtomicOrdering of unordered or higher.
static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset)
Stack pointer relative access.
Class to represent pointers.
unsigned getKillRegState(bool B)
static GCRegistry::Add< CoreCLRGC > E("coreclr","CoreCLR-compatible GC")
Flag
These should be considered private to the implementation of the MCInstrDesc class.
uint64_t getElementOffset(unsigned Idx) const
an instruction for type-safe pointer arithmetic to access elements of arrays and structs ...
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool isInteger() const
isInteger - Return true if this is an integer, or a vector integer type.
unsigned getAlignment() const
Return the alignment of the access that is being performed.
unsigned const MachineRegisterInfo * MRI
MVT - Machine Value Type.
The instances of the Type class are immutable: once they are created, they are never changed...
DIExpression * getExpression() const
Conditional or Unconditional Branch instruction.
C - The default llvm calling convention, compatible with C.
bool isVectorTy() const
True if this is an instance of VectorType.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
This is an important base class in LLVM.
bool isVector() const
isVector - Return true if this is a vector value type.
X86_StdCall - stdcall is the calling conventions mostly used by the Win32 API.
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
ConstantFP - Floating Point Values [float, double].
const MCPhysReg * ImplicitDefs
Value * getRawDest() const
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
static std::pair< unsigned, bool > getX86SSEConditionCode(CmpInst::Predicate Predicate)
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
static const unsigned End
TRAP - Trapping instruction.
Value * getOperand(unsigned i) const
0 1 1 1 True if ordered (no nans)
Value * getPointerOperand()
Predicate getPredicate() const
Return the predicate for this instruction.
1 1 1 1 Always true (always folded)
unsigned getSETFromCond(CondCode CC, bool HasMemoryOperand=false)
Return a set opcode for the given condition and whether it has a memory operand.
enum llvm::X86AddressMode::@388 BaseType
EVT - Extended Value Type.
LLVMContext & getContext() const
All values hold a context through their type.
1 1 0 1 True if unordered, less than, or equal
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned getStackRegister() const
static const MachineInstrBuilder & addFrameReference(const MachineInstrBuilder &MIB, int FI, int Offset=0, bool mem=true)
addFrameReference - This function is used to add a reference to the base of an abstract object on the...
The memory access writes data.
bool bitsGT(EVT VT) const
bitsGT - Return true if this has more bits than VT.
0 0 1 0 True if ordered and greater than
Iterator for intrusive lists based on ilist_node.
CCState - This class holds information needed while lowering arguments and return values...
This is the shared class of boolean and integer constants.
constexpr bool isInt< 32 >(int64_t x)
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type...
MachineOperand class - Representation of each machine instruction operand.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
1 1 0 0 True if unordered or less than
bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size...
Type * getType() const
All values are typed, get the type of this value.
Provides information about what library functions are available for the current target.
CCValAssign - Represent assignment of one arg/retval to a location.
union llvm::X86AddressMode::@389 Base
Value * getLength() const
void getFullAddress(SmallVectorImpl< MachineOperand > &MO)
This class wraps the llvm.memcpy intrinsic.
Function * getCalledFunction() const
Return the function called, or null if this is an indirect function invocation.
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
static GCRegistry::Add< ShadowStackGC > C("shadow-stack","Very portable GC for uncooperative code generators")
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
AttributeSet getAttributes() const
Return the attribute list for this Function.
bool is128BitVector() const
is128BitVector - Return true if this is a 128-bit vector type.
Value * getArgOperand(unsigned i) const
getArgOperand/setArgOperand - Return/set the i-th call argument.
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
The C convention as specified in the x86-64 supplement to the System V ABI, used on most non-Windows ...
bool isIntegerTy() const
True if this is an instance of IntegerType.
This file defines the FastISel class.
unsigned getCMovFromCond(CondCode CC, unsigned RegBytes, bool HasMemoryOperand=false)
Return a cmov opcode for the given condition, register size in bytes, and operand type...
ZERO_EXTEND - Used for integer types, zeroing the new bits.
ANY_EXTEND - Used for integer types. The high bits are undefined.
The C convention as implemented on Windows/x86-64.
const MachineInstrBuilder & addGlobalAddress(const GlobalValue *GV, int64_t Offset=0, unsigned char TargetFlags=0) const
bool isNonTemporal() const
bool isStructTy() const
True if this is an instance of StructType.
The memory access reads data.
MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr) const override
foldMemoryOperand - If this target supports it, fold a load or store of the specified stack slot into...
Representation of each machine instruction.
MO_GOTOFF - On a symbol operand this indicates that the immediate is the offset to the location of th...
static const MachineInstrBuilder & addDirectMem(const MachineInstrBuilder &MIB, unsigned Reg)
addDirectMem - This function is used to add a direct memory reference to the current instruction – th...
Value * getCondition() const
Bitwise operators - logical and, logical or, logical xor.
bool is256BitVector() const
is256BitVector - Return true if this is a 256-bit vector type.
unsigned getSRetReturnReg() const
static IntegerType * getInt32Ty(LLVMContext &C)
unsigned getBytesToPopOnReturn() const
unsigned greater or equal
unsigned getAlignment() const
Return the alignment of the access that is being performed.
ImmutableCallSite - establish a view to a call site for examination.
unsigned getSizeInBits() const
getSizeInBits - Return the size of the specified value type in bits.
void setReg(unsigned Reg)
Change the register this operand corresponds to.
bool isOSMSVCRT() const
Is this a "Windows" OS targeting a "MSVCRT.dll" environment.
void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeReturn - Analyze the returned values of a return, incorporating info about the result values i...
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
bool hasOneUse() const
Return true if there is exactly one user of this value.
static std::pair< X86::CondCode, bool > getX86ConditionCode(CmpInst::Predicate Predicate)
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
bool paramHasAttr(unsigned i, Attribute::AttrKind Kind) const
Return true if the call or the callee has the given attribute.
0 1 1 0 True if ordered and operands are unequal
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
1 0 1 0 True if unordered or greater than
constexpr bool isUInt< 16 >(uint64_t x)
static MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
int CreateStackObject(uint64_t Size, unsigned Alignment, bool isSS, const AllocaInst *Alloca=nullptr)
Create a new statically sized stack object, returning a nonnegative identifier to represent it...
Value * getRawSource() const
Return the arguments to the instruction.
unsigned getReg() const
getReg - Returns the register number.
DILocalVariable * getVariable() const
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
bool isSimple() const
isSimple - Test if the given EVT is simple (as opposed to being extended).
0 0 0 1 True if ordered and equal
LLVM Value Representation.
mop_iterator operands_begin()
1 0 1 1 True if unordered, greater than, or equal
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
static unsigned X86ChooseCmpImmediateOpcode(EVT VT, const ConstantInt *RHSC)
If we have a comparison with RHS as the RHS of the comparison, return an opcode that works for the co...
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
unsigned getDestAddressSpace() const
MO_PIC_BASE_OFFSET - On a symbol operand this indicates that the immediate should get the value of th...
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this variable.
unsigned getLocMemOffset() const
X86AddressMode - This struct holds a generalized full x86 address mode.
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
0 0 1 1 True if ordered and greater than or equal
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
This represents the llvm.dbg.declare instruction.
unsigned getSourceAddressSpace() const
static GCRegistry::Add< ErlangGC > A("erlang","erlang-compatible garbage collector")
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
Value * getPointerOperand()
unsigned AllocateStack(unsigned Size, unsigned Align)
AllocateStack - Allocate a chunk of stack space with the specified size and alignment.
Fast - This calling convention attempts to make calls as fast as possible (e.g.
const BasicBlock * getParent() const
static const MachineInstrBuilder & addFullAddress(const MachineInstrBuilder &MIB, const X86AddressMode &AM)
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
MVT getSimpleVT() const
getSimpleVT - Return the SimpleValueType held in the specified simple EVT.
iterator_range< arg_iterator > args()
0 0 0 0 Always false (always folded)
static unsigned computeBytesPoppedByCalleeForSRet(const X86Subtarget *Subtarget, CallingConv::ID CC, ImmutableCallSite *CS)
A wrapper class for inspecting calls to intrinsic functions.
static unsigned X86ChooseCmpOpcode(EVT VT, const X86Subtarget *Subtarget)
an instruction to allocate memory on the stack
gep_type_iterator gep_type_begin(const User *GEP)
unsigned getPtrSizedFrameRegister(const MachineFunction &MF) const
bool usesWindowsCFI() const
bool contains(unsigned Reg) const
Return true if the specified register is included in this register class.