31#include "llvm/Config/config.h"
45#include "llvm/IR/IntrinsicsAArch64.h"
46#include "llvm/IR/IntrinsicsAMDGPU.h"
47#include "llvm/IR/IntrinsicsARM.h"
48#include "llvm/IR/IntrinsicsWebAssembly.h"
49#include "llvm/IR/IntrinsicsX86.h"
77 unsigned BitShift =
DL.getTypeSizeInBits(SrcEltTy);
78 for (
unsigned i = 0; i != NumSrcElts; ++i) {
80 if (
DL.isLittleEndian())
81 Element =
C->getAggregateElement(NumSrcElts - i - 1);
83 Element =
C->getAggregateElement(i);
85 if (Element && isa<UndefValue>(Element)) {
90 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
95 Result |= ElementCI->getValue().zext(
Result.getBitWidth());
106 "Invalid constantexpr bitcast!");
112 if (
auto *VTy = dyn_cast<VectorType>(
C->getType())) {
115 unsigned NumSrcElts = cast<FixedVectorType>(VTy)->getNumElements();
116 Type *SrcEltTy = VTy->getElementType();
129 if (
Constant *CE = foldConstVectorToAPInt(Result, DestTy,
C,
130 SrcEltTy, NumSrcElts,
DL))
133 if (isa<IntegerType>(DestTy))
134 return ConstantInt::get(DestTy, Result);
142 auto *DestVTy = dyn_cast<VectorType>(DestTy);
148 if (isa<ConstantFP>(
C) || isa<ConstantInt>(
C)) {
154 if (!isa<ConstantDataVector>(
C) && !isa<ConstantVector>(
C))
158 unsigned NumDstElt = cast<FixedVectorType>(DestVTy)->getNumElements();
159 unsigned NumSrcElt = cast<FixedVectorType>(
C->getType())->getNumElements();
160 if (NumDstElt == NumSrcElt)
163 Type *SrcEltTy = cast<VectorType>(
C->getType())->getElementType();
164 Type *DstEltTy = DestVTy->getElementType();
197 if (!isa<ConstantVector>(
C) &&
198 !isa<ConstantDataVector>(
C))
206 bool isLittleEndian =
DL.isLittleEndian();
209 if (NumDstElt < NumSrcElt) {
212 unsigned Ratio = NumSrcElt/NumDstElt;
215 for (
unsigned i = 0; i != NumDstElt; ++i) {
218 unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1);
219 for (
unsigned j = 0;
j != Ratio; ++
j) {
220 Constant *Src =
C->getAggregateElement(SrcElt++);
221 if (Src && isa<UndefValue>(Src))
223 cast<VectorType>(
C->getType())->getElementType());
225 Src = dyn_cast_or_null<ConstantInt>(Src);
232 assert(Src &&
"Constant folding cannot fail on plain integers");
236 Instruction::Shl, Src, ConstantInt::get(Src->getType(), ShiftAmt),
238 assert(Src &&
"Constant folding cannot fail on plain integers");
240 ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
244 assert(Elt &&
"Constant folding cannot fail on plain integers");
252 unsigned Ratio = NumDstElt/NumSrcElt;
253 unsigned DstBitSize =
DL.getTypeSizeInBits(DstEltTy);
256 for (
unsigned i = 0; i != NumSrcElt; ++i) {
257 auto *Element =
C->getAggregateElement(i);
262 if (isa<UndefValue>(Element)) {
268 auto *Src = dyn_cast<ConstantInt>(Element);
272 unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1);
273 for (
unsigned j = 0;
j != Ratio; ++
j) {
276 APInt Elt = Src->getValue().lshr(ShiftAmt);
277 ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
280 Result.push_back(ConstantInt::get(DstEltTy, Elt.
trunc(DstBitSize)));
298 if ((GV = dyn_cast<GlobalValue>(
C))) {
304 if (
auto *FoundDSOEquiv = dyn_cast<DSOLocalEquivalent>(
C)) {
306 *DSOEquiv = FoundDSOEquiv;
307 GV = FoundDSOEquiv->getGlobalValue();
314 auto *CE = dyn_cast<ConstantExpr>(
C);
315 if (!CE)
return false;
318 if (CE->getOpcode() == Instruction::PtrToInt ||
319 CE->getOpcode() == Instruction::BitCast)
324 auto *
GEP = dyn_cast<GEPOperator>(CE);
328 unsigned BitWidth =
DL.getIndexTypeSizeInBits(
GEP->getType());
337 if (!
GEP->accumulateConstantOffset(
DL, TmpOffset))
347 Type *SrcTy =
C->getType();
351 TypeSize DestSize =
DL.getTypeSizeInBits(DestTy);
352 TypeSize SrcSize =
DL.getTypeSizeInBits(SrcTy);
353 if (!TypeSize::isKnownGE(SrcSize, DestSize))
364 if (SrcSize == DestSize &&
371 Cast = Instruction::IntToPtr;
373 Cast = Instruction::PtrToInt;
394 ElemC =
C->getAggregateElement(Elem++);
395 }
while (ElemC &&
DL.getTypeSizeInBits(ElemC->
getType()).isZero());
400 if (
auto *VT = dyn_cast<VectorType>(SrcTy))
401 if (!
DL.typeSizeEqualsStoreSize(VT->getElementType()))
404 C =
C->getAggregateElement(0u);
419 assert(ByteOffset <=
DL.getTypeAllocSize(
C->getType()) &&
420 "Out of range access");
424 if (isa<ConstantAggregateZero>(
C) || isa<UndefValue>(
C))
427 if (
auto *CI = dyn_cast<ConstantInt>(
C)) {
428 if ((CI->getBitWidth() & 7) != 0)
430 const APInt &Val = CI->getValue();
431 unsigned IntBytes =
unsigned(CI->getBitWidth()/8);
433 for (
unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) {
434 unsigned n = ByteOffset;
435 if (!
DL.isLittleEndian())
436 n = IntBytes - n - 1;
443 if (
auto *CFP = dyn_cast<ConstantFP>(
C)) {
444 if (CFP->getType()->isDoubleTy()) {
446 return ReadDataFromGlobal(
C, ByteOffset, CurPtr, BytesLeft,
DL);
448 if (CFP->getType()->isFloatTy()){
450 return ReadDataFromGlobal(
C, ByteOffset, CurPtr, BytesLeft,
DL);
452 if (CFP->getType()->isHalfTy()){
454 return ReadDataFromGlobal(
C, ByteOffset, CurPtr, BytesLeft,
DL);
459 if (
auto *CS = dyn_cast<ConstantStruct>(
C)) {
463 ByteOffset -= CurEltOffset;
468 uint64_t EltSize =
DL.getTypeAllocSize(CS->getOperand(
Index)->getType());
470 if (ByteOffset < EltSize &&
471 !ReadDataFromGlobal(CS->getOperand(
Index), ByteOffset, CurPtr,
478 if (
Index == CS->getType()->getNumElements())
484 if (BytesLeft <= NextEltOffset - CurEltOffset - ByteOffset)
488 CurPtr += NextEltOffset - CurEltOffset - ByteOffset;
489 BytesLeft -= NextEltOffset - CurEltOffset - ByteOffset;
491 CurEltOffset = NextEltOffset;
496 if (isa<ConstantArray>(
C) || isa<ConstantVector>(
C) ||
497 isa<ConstantDataSequential>(
C)) {
500 if (
auto *AT = dyn_cast<ArrayType>(
C->getType())) {
501 NumElts = AT->getNumElements();
502 EltTy = AT->getElementType();
503 EltSize =
DL.getTypeAllocSize(EltTy);
505 NumElts = cast<FixedVectorType>(
C->getType())->getNumElements();
506 EltTy = cast<FixedVectorType>(
C->getType())->getElementType();
509 if (!
DL.typeSizeEqualsStoreSize(EltTy))
512 EltSize =
DL.getTypeStoreSize(EltTy);
518 if (!ReadDataFromGlobal(
C->getAggregateElement(
Index),
Offset, CurPtr,
523 assert(BytesWritten <= EltSize &&
"Not indexing into this element?");
524 if (BytesWritten >= BytesLeft)
528 BytesLeft -= BytesWritten;
529 CurPtr += BytesWritten;
534 if (
auto *CE = dyn_cast<ConstantExpr>(
C)) {
535 if (
CE->getOpcode() == Instruction::IntToPtr &&
536 CE->getOperand(0)->getType() ==
DL.getIntPtrType(
CE->getType())) {
537 return ReadDataFromGlobal(
CE->getOperand(0), ByteOffset, CurPtr,
549 if (isa<ScalableVectorType>(LoadTy))
552 auto *IntType = dyn_cast<IntegerType>(LoadTy);
565 DL.getTypeSizeInBits(LoadTy).getFixedValue());
567 if (Res->isNullValue() && !LoadTy->
isX86_MMXTy() &&
575 if (Res->isNullValue() && !LoadTy->
isX86_MMXTy() &&
588 unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8;
589 if (BytesLoaded > 32 || BytesLoaded == 0)
593 if (
Offset <= -1 *
static_cast<int64_t
>(BytesLoaded))
597 TypeSize InitializerSize =
DL.getTypeAllocSize(
C->getType());
605 unsigned char RawBytes[32] = {0};
606 unsigned char *CurPtr = RawBytes;
607 unsigned BytesLeft = BytesLoaded;
616 if (!ReadDataFromGlobal(
C,
Offset, CurPtr, BytesLeft,
DL))
619 APInt ResultVal =
APInt(IntType->getBitWidth(), 0);
620 if (
DL.isLittleEndian()) {
621 ResultVal = RawBytes[BytesLoaded - 1];
622 for (
unsigned i = 1; i != BytesLoaded; ++i) {
624 ResultVal |= RawBytes[BytesLoaded - 1 - i];
627 ResultVal = RawBytes[0];
628 for (
unsigned i = 1; i != BytesLoaded; ++i) {
630 ResultVal |= RawBytes[i];
634 return ConstantInt::get(IntType->getContext(), ResultVal);
654 if (NBytes > UINT16_MAX)
662 unsigned char *CurPtr = RawBytes.
data();
664 if (!ReadDataFromGlobal(
Init,
Offset, CurPtr, NBytes,
DL))
677 if (!isa<ConstantAggregate>(
Base) && !isa<ConstantDataSequential>(
Base))
682 if (!
Offset.isZero() || !Indices[0].isZero())
687 if (
Index.isNegative() ||
Index.getActiveBits() >= 32)
690 C =
C->getAggregateElement(
Index.getZExtValue());
716 if (
Offset.getSignificantBits() <= 64)
718 FoldReinterpretLoadFromConst(
C, Ty,
Offset.getSExtValue(),
DL))
735 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
738 C = cast<Constant>(
C->stripAndAccumulateConstantOffsets(
759 if (isa<PoisonValue>(
C))
761 if (isa<UndefValue>(
C))
765 if (!
DL.typeSizeEqualsStoreSize(
C->getType()))
769 if (
C->isAllOnesValue() &&
789 if (Opc == Instruction::And) {
792 if ((Known1.
One | Known0.
Zero).isAllOnes()) {
796 if ((Known0.
One | Known1.
Zero).isAllOnes()) {
808 if (Opc == Instruction::Sub) {
814 unsigned OpSize =
DL.getTypeSizeInBits(Op0->
getType());
831 std::optional<ConstantRange>
InRange,
833 Type *IntIdxTy =
DL.getIndexType(ResultTy);
838 for (
unsigned i = 1, e = Ops.
size(); i != e; ++i) {
841 SrcElemTy, Ops.
slice(1, i - 1)))) &&
842 Ops[i]->getType()->getScalarType() != IntIdxScalarTy) {
845 Ops[i]->getType()->isVectorTy() ? IntIdxTy : IntIdxScalarTy;
869 Type *SrcElemTy =
GEP->getSourceElementType();
871 if (!SrcElemTy->
isSized() || isa<ScalableVectorType>(SrcElemTy))
874 if (
Constant *
C = CastGEPIndices(SrcElemTy, Ops, ResTy,
GEP->getNoWrapFlags(),
875 GEP->getInRange(),
DL, TLI))
879 if (!
Ptr->getType()->isPointerTy())
882 Type *IntIdxTy =
DL.getIndexType(
Ptr->getType());
884 for (
unsigned i = 1, e = Ops.
size(); i != e; ++i)
885 if (!isa<ConstantInt>(Ops[i]))
888 unsigned BitWidth =
DL.getTypeSizeInBits(IntIdxTy);
891 DL.getIndexedOffsetInType(
894 std::optional<ConstantRange>
InRange =
GEP->getInRange();
900 bool Overflow =
false;
901 while (
auto *
GEP = dyn_cast<GEPOperator>(
Ptr)) {
902 NW &=
GEP->getNoWrapFlags();
907 bool AllConstantInt =
true;
908 for (
Value *NestedOp : NestedOps)
909 if (!isa<ConstantInt>(NestedOp)) {
910 AllConstantInt =
false;
924 Ptr = cast<Constant>(
GEP->getOperand(0));
925 SrcElemTy =
GEP->getSourceElementType();
939 if (
auto *CE = dyn_cast<ConstantExpr>(
Ptr)) {
940 if (
CE->getOpcode() == Instruction::IntToPtr) {
941 if (
auto *
Base = dyn_cast<ConstantInt>(
CE->getOperand(0)))
946 auto *PTy = cast<PointerType>(
Ptr->getType());
947 if ((
Ptr->isNullValue() || BasePtr != 0) &&
948 !
DL.isNonIntegralPointerType(PTy)) {
956 bool CanBeNull, CanBeFreed;
958 Ptr->getPointerDereferenceableBytes(
DL, CanBeNull, CanBeFreed);
959 if (DerefBytes != 0 && !CanBeNull &&
Offset.sle(DerefBytes))
966 ConstantInt::get(Ctx,
Offset), NW,
975Constant *ConstantFoldInstOperandsImpl(
const Value *InstOrCE,
unsigned Opcode,
979 bool AllowNonDeterministic) {
989 case Instruction::FAdd:
990 case Instruction::FSub:
991 case Instruction::FMul:
992 case Instruction::FDiv:
993 case Instruction::FRem:
997 if (
const auto *
I = dyn_cast<Instruction>(InstOrCE)) {
999 AllowNonDeterministic);
1008 if (
auto *
GEP = dyn_cast<GEPOperator>(InstOrCE)) {
1009 Type *SrcElemTy =
GEP->getSourceElementType();
1017 GEP->getNoWrapFlags(),
1021 if (
auto *CE = dyn_cast<ConstantExpr>(InstOrCE))
1022 return CE->getWithOperands(Ops);
1025 default:
return nullptr;
1026 case Instruction::ICmp:
1027 case Instruction::FCmp: {
1028 auto *
C = cast<CmpInst>(InstOrCE);
1032 case Instruction::Freeze:
1034 case Instruction::Call:
1035 if (
auto *
F = dyn_cast<Function>(Ops.
back())) {
1036 const auto *
Call = cast<CallBase>(InstOrCE);
1039 AllowNonDeterministic);
1042 case Instruction::Select:
1044 case Instruction::ExtractElement:
1046 case Instruction::ExtractValue:
1048 Ops[0], cast<ExtractValueInst>(InstOrCE)->getIndices());
1049 case Instruction::InsertElement:
1051 case Instruction::InsertValue:
1053 Ops[0], Ops[1], cast<InsertValueInst>(InstOrCE)->getIndices());
1054 case Instruction::ShuffleVector:
1056 Ops[0], Ops[1], cast<ShuffleVectorInst>(InstOrCE)->getShuffleMask());
1057 case Instruction::Load: {
1058 const auto *LI = dyn_cast<LoadInst>(InstOrCE);
1059 if (LI->isVolatile())
1078 if (!isa<ConstantVector>(
C) && !isa<ConstantExpr>(
C))
1082 for (
const Use &OldU :
C->operands()) {
1083 Constant *OldC = cast<Constant>(&OldU);
1087 if (isa<ConstantVector>(OldC) || isa<ConstantExpr>(OldC)) {
1088 auto It = FoldedOps.
find(OldC);
1089 if (It == FoldedOps.
end()) {
1090 NewC = ConstantFoldConstantImpl(OldC,
DL, TLI, FoldedOps);
1091 FoldedOps.
insert({OldC, NewC});
1099 if (
auto *CE = dyn_cast<ConstantExpr>(
C)) {
1100 if (
Constant *Res = ConstantFoldInstOperandsImpl(
1101 CE,
CE->getOpcode(), Ops,
DL, TLI,
true))
1106 assert(isa<ConstantVector>(
C));
1115 if (
auto *PN = dyn_cast<PHINode>(
I)) {
1131 C = ConstantFoldConstantImpl(
C,
DL, TLI, FoldedOps);
1134 if (CommonValue &&
C != CommonValue)
1145 if (!
all_of(
I->operands(), [](
Use &U) { return isa<Constant>(U); }))
1150 for (
const Use &OpU :
I->operands()) {
1151 auto *
Op = cast<Constant>(&OpU);
1153 Op = ConstantFoldConstantImpl(
Op,
DL, TLI, FoldedOps);
1163 return ConstantFoldConstantImpl(
C,
DL, TLI, FoldedOps);
1170 bool AllowNonDeterministic) {
1171 return ConstantFoldInstOperandsImpl(
I,
I->getOpcode(), Ops,
DL, TLI,
1172 AllowNonDeterministic);
1189 if (
auto *CE0 = dyn_cast<ConstantExpr>(Ops0)) {
1191 if (CE0->getOpcode() == Instruction::IntToPtr) {
1192 Type *IntPtrTy =
DL.getIntPtrType(CE0->getType());
1204 if (CE0->getOpcode() == Instruction::PtrToInt) {
1205 Type *IntPtrTy =
DL.getIntPtrType(CE0->getOperand(0)->getType());
1206 if (CE0->getType() == IntPtrTy) {
1214 if (
auto *CE1 = dyn_cast<ConstantExpr>(Ops1)) {
1215 if (CE0->getOpcode() == CE1->getOpcode()) {
1216 if (CE0->getOpcode() == Instruction::IntToPtr) {
1217 Type *IntPtrTy =
DL.getIntPtrType(CE0->getType());
1231 if (CE0->getOpcode() == Instruction::PtrToInt) {
1232 Type *IntPtrTy =
DL.getIntPtrType(CE0->getOperand(0)->getType());
1233 if (CE0->getType() == IntPtrTy &&
1234 CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType()) {
1236 Predicate, CE0->getOperand(0), CE1->getOperand(0),
DL, TLI);
1248 unsigned IndexWidth =
DL.getIndexTypeSizeInBits(Ops0->
getType());
1249 APInt Offset0(IndexWidth, 0);
1252 APInt Offset1(IndexWidth, 0);
1255 if (Stripped0 == Stripped1)
1261 }
else if (isa<ConstantExpr>(Ops1)) {
1264 Predicate = ICmpInst::getSwappedPredicate(Predicate);
1291 if (isa<ConstantExpr>(
LHS) || isa<ConstantExpr>(
RHS))
1302 if (!
I || !
I->getParent() || !
I->getFunction())
1305 ConstantFP *CFP = dyn_cast<ConstantFP>(Operand);
1328 return ConstantFP::get(
1346 bool AllowNonDeterministic) {
1359 if (!AllowNonDeterministic)
1360 if (
auto *
FP = dyn_cast_or_null<FPMathOperator>(
I))
1361 if (
FP->hasNoSignedZeros() ||
FP->hasAllowReassoc() ||
1362 FP->hasAllowContract() ||
FP->hasAllowReciprocal())
1376 if (!AllowNonDeterministic &&
C->isNaN())
1392 case Instruction::PtrToInt:
1393 if (
auto *CE = dyn_cast<ConstantExpr>(
C)) {
1397 if (CE->getOpcode() == Instruction::IntToPtr) {
1400 DL.getIntPtrType(CE->getType()),
1402 }
else if (
auto *
GEP = dyn_cast<GEPOperator>(CE)) {
1406 unsigned BitWidth =
DL.getIndexTypeSizeInBits(
GEP->getType());
1408 auto *
Base = cast<Constant>(
GEP->stripAndAccumulateConstantOffsets(
1409 DL, BaseOffset,
true));
1410 if (
Base->isNullValue()) {
1411 FoldedValue = ConstantInt::get(CE->getContext(), BaseOffset);
1414 if (
GEP->getNumIndices() == 1 &&
1415 GEP->getSourceElementType()->isIntegerTy(8)) {
1416 auto *
Ptr = cast<Constant>(
GEP->getPointerOperand());
1417 auto *Sub = dyn_cast<ConstantExpr>(
GEP->getOperand(1));
1418 Type *IntIdxTy =
DL.getIndexType(
Ptr->getType());
1419 if (Sub && Sub->getType() == IntIdxTy &&
1420 Sub->getOpcode() == Instruction::Sub &&
1421 Sub->getOperand(0)->isNullValue())
1434 case Instruction::IntToPtr:
1439 if (
auto *CE = dyn_cast<ConstantExpr>(
C)) {
1440 if (CE->getOpcode() == Instruction::PtrToInt) {
1441 Constant *SrcPtr = CE->getOperand(0);
1442 unsigned SrcPtrSize =
DL.getPointerTypeSizeInBits(SrcPtr->
getType());
1443 unsigned MidIntSize = CE->getType()->getScalarSizeInBits();
1445 if (MidIntSize >= SrcPtrSize) {
1453 case Instruction::Trunc:
1454 case Instruction::ZExt:
1455 case Instruction::SExt:
1456 case Instruction::FPTrunc:
1457 case Instruction::FPExt:
1458 case Instruction::UIToFP:
1459 case Instruction::SIToFP:
1460 case Instruction::FPToUI:
1461 case Instruction::FPToSI:
1462 case Instruction::AddrSpaceCast:
1464 case Instruction::BitCast:
1475 Type *SrcTy =
C->getType();
1476 if (SrcTy == DestTy)
1490 if (Call->isNoBuiltin())
1492 if (Call->getFunctionType() !=
F->getFunctionType())
1494 switch (
F->getIntrinsicID()) {
1497 case Intrinsic::bswap:
1498 case Intrinsic::ctpop:
1499 case Intrinsic::ctlz:
1500 case Intrinsic::cttz:
1501 case Intrinsic::fshl:
1502 case Intrinsic::fshr:
1503 case Intrinsic::launder_invariant_group:
1504 case Intrinsic::strip_invariant_group:
1505 case Intrinsic::masked_load:
1506 case Intrinsic::get_active_lane_mask:
1507 case Intrinsic::abs:
1508 case Intrinsic::smax:
1509 case Intrinsic::smin:
1510 case Intrinsic::umax:
1511 case Intrinsic::umin:
1512 case Intrinsic::scmp:
1513 case Intrinsic::ucmp:
1514 case Intrinsic::sadd_with_overflow:
1515 case Intrinsic::uadd_with_overflow:
1516 case Intrinsic::ssub_with_overflow:
1517 case Intrinsic::usub_with_overflow:
1518 case Intrinsic::smul_with_overflow:
1519 case Intrinsic::umul_with_overflow:
1520 case Intrinsic::sadd_sat:
1521 case Intrinsic::uadd_sat:
1522 case Intrinsic::ssub_sat:
1523 case Intrinsic::usub_sat:
1524 case Intrinsic::smul_fix:
1525 case Intrinsic::smul_fix_sat:
1526 case Intrinsic::bitreverse:
1527 case Intrinsic::is_constant:
1528 case Intrinsic::vector_reduce_add:
1529 case Intrinsic::vector_reduce_mul:
1530 case Intrinsic::vector_reduce_and:
1531 case Intrinsic::vector_reduce_or:
1532 case Intrinsic::vector_reduce_xor:
1533 case Intrinsic::vector_reduce_smin:
1534 case Intrinsic::vector_reduce_smax:
1535 case Intrinsic::vector_reduce_umin:
1536 case Intrinsic::vector_reduce_umax:
1538 case Intrinsic::amdgcn_perm:
1539 case Intrinsic::amdgcn_wave_reduce_umin:
1540 case Intrinsic::amdgcn_wave_reduce_umax:
1541 case Intrinsic::amdgcn_s_wqm:
1542 case Intrinsic::amdgcn_s_quadmask:
1543 case Intrinsic::amdgcn_s_bitreplicate:
1544 case Intrinsic::arm_mve_vctp8:
1545 case Intrinsic::arm_mve_vctp16:
1546 case Intrinsic::arm_mve_vctp32:
1547 case Intrinsic::arm_mve_vctp64:
1548 case Intrinsic::aarch64_sve_convert_from_svbool:
1550 case Intrinsic::wasm_trunc_signed:
1551 case Intrinsic::wasm_trunc_unsigned:
1556 case Intrinsic::minnum:
1557 case Intrinsic::maxnum:
1558 case Intrinsic::minimum:
1559 case Intrinsic::maximum:
1560 case Intrinsic::log:
1561 case Intrinsic::log2:
1562 case Intrinsic::log10:
1563 case Intrinsic::exp:
1564 case Intrinsic::exp2:
1565 case Intrinsic::exp10:
1566 case Intrinsic::sqrt:
1567 case Intrinsic::sin:
1568 case Intrinsic::cos:
1569 case Intrinsic::pow:
1570 case Intrinsic::powi:
1571 case Intrinsic::ldexp:
1572 case Intrinsic::fma:
1573 case Intrinsic::fmuladd:
1574 case Intrinsic::frexp:
1575 case Intrinsic::fptoui_sat:
1576 case Intrinsic::fptosi_sat:
1577 case Intrinsic::convert_from_fp16:
1578 case Intrinsic::convert_to_fp16:
1579 case Intrinsic::amdgcn_cos:
1580 case Intrinsic::amdgcn_cubeid:
1581 case Intrinsic::amdgcn_cubema:
1582 case Intrinsic::amdgcn_cubesc:
1583 case Intrinsic::amdgcn_cubetc:
1584 case Intrinsic::amdgcn_fmul_legacy:
1585 case Intrinsic::amdgcn_fma_legacy:
1586 case Intrinsic::amdgcn_fract:
1587 case Intrinsic::amdgcn_sin:
1589 case Intrinsic::x86_sse_cvtss2si:
1590 case Intrinsic::x86_sse_cvtss2si64:
1591 case Intrinsic::x86_sse_cvttss2si:
1592 case Intrinsic::x86_sse_cvttss2si64:
1593 case Intrinsic::x86_sse2_cvtsd2si:
1594 case Intrinsic::x86_sse2_cvtsd2si64:
1595 case Intrinsic::x86_sse2_cvttsd2si:
1596 case Intrinsic::x86_sse2_cvttsd2si64:
1597 case Intrinsic::x86_avx512_vcvtss2si32:
1598 case Intrinsic::x86_avx512_vcvtss2si64:
1599 case Intrinsic::x86_avx512_cvttss2si:
1600 case Intrinsic::x86_avx512_cvttss2si64:
1601 case Intrinsic::x86_avx512_vcvtsd2si32:
1602 case Intrinsic::x86_avx512_vcvtsd2si64:
1603 case Intrinsic::x86_avx512_cvttsd2si:
1604 case Intrinsic::x86_avx512_cvttsd2si64:
1605 case Intrinsic::x86_avx512_vcvtss2usi32:
1606 case Intrinsic::x86_avx512_vcvtss2usi64:
1607 case Intrinsic::x86_avx512_cvttss2usi:
1608 case Intrinsic::x86_avx512_cvttss2usi64:
1609 case Intrinsic::x86_avx512_vcvtsd2usi32:
1610 case Intrinsic::x86_avx512_vcvtsd2usi64:
1611 case Intrinsic::x86_avx512_cvttsd2usi:
1612 case Intrinsic::x86_avx512_cvttsd2usi64:
1613 return !Call->isStrictFP();
1617 case Intrinsic::fabs:
1618 case Intrinsic::copysign:
1619 case Intrinsic::is_fpclass:
1622 case Intrinsic::ceil:
1623 case Intrinsic::floor:
1624 case Intrinsic::round:
1625 case Intrinsic::roundeven:
1626 case Intrinsic::trunc:
1627 case Intrinsic::nearbyint:
1628 case Intrinsic::rint:
1629 case Intrinsic::canonicalize:
1632 case Intrinsic::experimental_constrained_fma:
1633 case Intrinsic::experimental_constrained_fmuladd:
1634 case Intrinsic::experimental_constrained_fadd:
1635 case Intrinsic::experimental_constrained_fsub:
1636 case Intrinsic::experimental_constrained_fmul:
1637 case Intrinsic::experimental_constrained_fdiv:
1638 case Intrinsic::experimental_constrained_frem:
1639 case Intrinsic::experimental_constrained_ceil:
1640 case Intrinsic::experimental_constrained_floor:
1641 case Intrinsic::experimental_constrained_round:
1642 case Intrinsic::experimental_constrained_roundeven:
1643 case Intrinsic::experimental_constrained_trunc:
1644 case Intrinsic::experimental_constrained_nearbyint:
1645 case Intrinsic::experimental_constrained_rint:
1646 case Intrinsic::experimental_constrained_fcmp:
1647 case Intrinsic::experimental_constrained_fcmps:
1654 if (!
F->hasName() || Call->isStrictFP())
1665 return Name ==
"acos" ||
Name ==
"acosf" ||
1666 Name ==
"asin" ||
Name ==
"asinf" ||
1667 Name ==
"atan" ||
Name ==
"atanf" ||
1668 Name ==
"atan2" ||
Name ==
"atan2f";
1670 return Name ==
"ceil" ||
Name ==
"ceilf" ||
1674 return Name ==
"exp" ||
Name ==
"expf" ||
1677 return Name ==
"fabs" ||
Name ==
"fabsf" ||
1678 Name ==
"floor" ||
Name ==
"floorf" ||
1681 return Name ==
"log" ||
Name ==
"logf" ||
Name ==
"log2" ||
1682 Name ==
"log2f" ||
Name ==
"log10" ||
Name ==
"log10f" ||
1685 return Name ==
"nearbyint" ||
Name ==
"nearbyintf";
1687 return Name ==
"pow" ||
Name ==
"powf";
1689 return Name ==
"remainder" ||
Name ==
"remainderf" ||
1690 Name ==
"rint" ||
Name ==
"rintf" ||
1691 Name ==
"round" ||
Name ==
"roundf";
1693 return Name ==
"sin" ||
Name ==
"sinf" ||
1694 Name ==
"sinh" ||
Name ==
"sinhf" ||
1697 return Name ==
"tan" ||
Name ==
"tanf" ||
1698 Name ==
"tanh" ||
Name ==
"tanhf" ||
1699 Name ==
"trunc" ||
Name ==
"truncf";
1707 if (
Name.size() < 12 ||
Name[1] !=
'_')
1713 return Name ==
"__acos_finite" ||
Name ==
"__acosf_finite" ||
1714 Name ==
"__asin_finite" ||
Name ==
"__asinf_finite" ||
1715 Name ==
"__atan2_finite" ||
Name ==
"__atan2f_finite";
1717 return Name ==
"__cosh_finite" ||
Name ==
"__coshf_finite";
1719 return Name ==
"__exp_finite" ||
Name ==
"__expf_finite" ||
1720 Name ==
"__exp2_finite" ||
Name ==
"__exp2f_finite";
1722 return Name ==
"__log_finite" ||
Name ==
"__logf_finite" ||
1723 Name ==
"__log10_finite" ||
Name ==
"__log10f_finite";
1725 return Name ==
"__pow_finite" ||
Name ==
"__powf_finite";
1727 return Name ==
"__sinh_finite" ||
Name ==
"__sinhf_finite";
1738 APF.convert(Ty->
getFltSemantics(), APFloat::rmNearestTiesToEven, &unused);
1739 return ConstantFP::get(Ty->
getContext(), APF);
1746#if defined(HAS_IEE754_FLOAT128) && defined(HAS_LOGF128)
1747Constant *GetConstantFoldFPValue128(float128 V,
Type *Ty) {
1749 return ConstantFP::get(Ty, V);
1755inline void llvm_fenv_clearexcept() {
1756#if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT
1757 feclearexcept(FE_ALL_EXCEPT);
1763inline bool llvm_fenv_testexcept() {
1764 int errno_val = errno;
1765 if (errno_val == ERANGE || errno_val == EDOM)
1767#if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT && HAVE_DECL_FE_INEXACT
1768 if (fetestexcept(FE_ALL_EXCEPT & ~FE_INEXACT))
1776 llvm_fenv_clearexcept();
1777 double Result = NativeFP(
V.convertToDouble());
1778 if (llvm_fenv_testexcept()) {
1779 llvm_fenv_clearexcept();
1783 return GetConstantFoldFPValue(Result, Ty);
1786#if defined(HAS_IEE754_FLOAT128) && defined(HAS_LOGF128)
1787Constant *ConstantFoldFP128(
long double (*NativeFP)(
long double),
1789 llvm_fenv_clearexcept();
1790 float128
Result = NativeFP(
V.convertToQuad());
1791 if (llvm_fenv_testexcept()) {
1792 llvm_fenv_clearexcept();
1796 return GetConstantFoldFPValue128(Result, Ty);
1800Constant *ConstantFoldBinaryFP(
double (*NativeFP)(
double,
double),
1802 llvm_fenv_clearexcept();
1803 double Result = NativeFP(
V.convertToDouble(),
W.convertToDouble());
1804 if (llvm_fenv_testexcept()) {
1805 llvm_fenv_clearexcept();
1809 return GetConstantFoldFPValue(Result, Ty);
1819 if (isa<ConstantAggregateZero>(
Op))
1823 if (isa<PoisonValue>(
Op) ||
Op->containsPoisonElement())
1827 if (!isa<ConstantVector>(
Op) && !isa<ConstantDataVector>(
Op))
1830 auto *EltC = dyn_cast<ConstantInt>(
Op->getAggregateElement(0U));
1834 APInt Acc = EltC->getValue();
1836 if (!(EltC = dyn_cast<ConstantInt>(
Op->getAggregateElement(
I))))
1838 const APInt &
X = EltC->getValue();
1840 case Intrinsic::vector_reduce_add:
1843 case Intrinsic::vector_reduce_mul:
1846 case Intrinsic::vector_reduce_and:
1849 case Intrinsic::vector_reduce_or:
1852 case Intrinsic::vector_reduce_xor:
1855 case Intrinsic::vector_reduce_smin:
1858 case Intrinsic::vector_reduce_smax:
1861 case Intrinsic::vector_reduce_umin:
1864 case Intrinsic::vector_reduce_umax:
1870 return ConstantInt::get(
Op->getContext(), Acc);
1880Constant *ConstantFoldSSEConvertToInt(
const APFloat &Val,
bool roundTowardZero,
1881 Type *Ty,
bool IsSigned) {
1884 assert(ResultWidth <= 64 &&
1885 "Can only constant fold conversions to 64 and 32 bit ints");
1888 bool isExact =
false;
1890 : APFloat::rmNearestTiesToEven;
1893 IsSigned,
mode, &isExact);
1894 if (status != APFloat::opOK &&
1895 (!roundTowardZero || status != APFloat::opInexact))
1897 return ConstantInt::get(Ty, UIntVal, IsSigned);
1901 Type *Ty =
Op->getType();
1904 return Op->getValueAPF().convertToDouble();
1908 APF.
convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, &unused);
1913 if (
auto *CI = dyn_cast<ConstantInt>(
Op)) {
1914 C = &CI->getValue();
1917 if (isa<UndefValue>(
Op)) {
1936 if (St == APFloat::opStatus::opOK)
1941 if (ORM && *ORM == RoundingMode::Dynamic)
1946 if (EB && *EB != fp::ExceptionBehavior::ebStrict)
1958 if (!ORM || *ORM == RoundingMode::Dynamic)
1963 return RoundingMode::NearestTiesToEven;
1973 return ConstantFP::get(
1985 if (Src.isNormal() || Src.isInfinity())
1986 return ConstantFP::get(CI->
getContext(), Src);
1993 return ConstantFP::get(CI->
getContext(), Src);
2025 if (IntrinsicID == Intrinsic::is_constant) {
2029 if (
Operands[0]->isManifestConstant())
2034 if (isa<PoisonValue>(
Operands[0])) {
2036 if (IntrinsicID == Intrinsic::canonicalize)
2040 if (isa<UndefValue>(
Operands[0])) {
2044 if (IntrinsicID == Intrinsic::cos ||
2045 IntrinsicID == Intrinsic::ctpop ||
2046 IntrinsicID == Intrinsic::fptoui_sat ||
2047 IntrinsicID == Intrinsic::fptosi_sat ||
2048 IntrinsicID == Intrinsic::canonicalize)
2050 if (IntrinsicID == Intrinsic::bswap ||
2051 IntrinsicID == Intrinsic::bitreverse ||
2052 IntrinsicID == Intrinsic::launder_invariant_group ||
2053 IntrinsicID == Intrinsic::strip_invariant_group)
2057 if (isa<ConstantPointerNull>(
Operands[0])) {
2059 if (IntrinsicID == Intrinsic::launder_invariant_group ||
2060 IntrinsicID == Intrinsic::strip_invariant_group) {
2065 Call->getParent() ?
Call->getCaller() :
nullptr;
2075 if (
auto *
Op = dyn_cast<ConstantFP>(
Operands[0])) {
2076 if (IntrinsicID == Intrinsic::convert_to_fp16) {
2080 Val.
convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &lost);
2087 if (IntrinsicID == Intrinsic::wasm_trunc_signed ||
2088 IntrinsicID == Intrinsic::wasm_trunc_unsigned) {
2089 bool Signed = IntrinsicID == Intrinsic::wasm_trunc_signed;
2096 bool IsExact =
false;
2098 U.convertToInteger(
Int, APFloat::rmTowardZero, &IsExact);
2100 if (
Status == APFloat::opOK ||
Status == APFloat::opInexact)
2101 return ConstantInt::get(Ty,
Int);
2106 if (IntrinsicID == Intrinsic::fptoui_sat ||
2107 IntrinsicID == Intrinsic::fptosi_sat) {
2110 IntrinsicID == Intrinsic::fptoui_sat);
2112 U.convertToInteger(
Int, APFloat::rmTowardZero, &IsExact);
2113 return ConstantInt::get(Ty,
Int);
2116 if (IntrinsicID == Intrinsic::canonicalize)
2117 return constantFoldCanonicalize(Ty, Call, U);
2119#if defined(HAS_IEE754_FLOAT128) && defined(HAS_LOGF128)
2121 if (IntrinsicID == Intrinsic::log) {
2122 float128
Result = logf128(
Op->getValueAPF().convertToQuad());
2123 return GetConstantFoldFPValue128(Result, Ty);
2128 Fp128Func == LibFunc_logl)
2129 return ConstantFoldFP128(logf128,
Op->getValueAPF(), Ty);
2138 if (IntrinsicID == Intrinsic::nearbyint || IntrinsicID == Intrinsic::rint) {
2139 U.roundToIntegral(APFloat::rmNearestTiesToEven);
2143 if (IntrinsicID == Intrinsic::round) {
2144 U.roundToIntegral(APFloat::rmNearestTiesToAway);
2148 if (IntrinsicID == Intrinsic::roundeven) {
2149 U.roundToIntegral(APFloat::rmNearestTiesToEven);
2153 if (IntrinsicID == Intrinsic::ceil) {
2154 U.roundToIntegral(APFloat::rmTowardPositive);
2158 if (IntrinsicID == Intrinsic::floor) {
2159 U.roundToIntegral(APFloat::rmTowardNegative);
2163 if (IntrinsicID == Intrinsic::trunc) {
2164 U.roundToIntegral(APFloat::rmTowardZero);
2168 if (IntrinsicID == Intrinsic::fabs) {
2173 if (IntrinsicID == Intrinsic::amdgcn_fract) {
2179 FloorU.roundToIntegral(APFloat::rmTowardNegative);
2181 APFloat AlmostOne(
U.getSemantics(), 1);
2182 AlmostOne.next(
true);
2189 std::optional<APFloat::roundingMode>
RM;
2190 switch (IntrinsicID) {
2193 case Intrinsic::experimental_constrained_nearbyint:
2194 case Intrinsic::experimental_constrained_rint: {
2195 auto CI = cast<ConstrainedFPIntrinsic>(Call);
2196 RM = CI->getRoundingMode();
2197 if (!RM || *RM == RoundingMode::Dynamic)
2201 case Intrinsic::experimental_constrained_round:
2202 RM = APFloat::rmNearestTiesToAway;
2204 case Intrinsic::experimental_constrained_ceil:
2205 RM = APFloat::rmTowardPositive;
2207 case Intrinsic::experimental_constrained_floor:
2208 RM = APFloat::rmTowardNegative;
2210 case Intrinsic::experimental_constrained_trunc:
2211 RM = APFloat::rmTowardZero;
2215 auto CI = cast<ConstrainedFPIntrinsic>(Call);
2218 if (IntrinsicID == Intrinsic::experimental_constrained_rint &&
2219 St == APFloat::opInexact) {
2220 std::optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
2224 }
else if (
U.isSignaling()) {
2225 std::optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
2245 switch (IntrinsicID) {
2247 case Intrinsic::log:
2248 return ConstantFoldFP(log, APF, Ty);
2249 case Intrinsic::log2:
2251 return ConstantFoldFP(
log2, APF, Ty);
2252 case Intrinsic::log10:
2254 return ConstantFoldFP(log10, APF, Ty);
2255 case Intrinsic::exp:
2256 return ConstantFoldFP(exp, APF, Ty);
2257 case Intrinsic::exp2:
2259 return ConstantFoldBinaryFP(pow,
APFloat(2.0), APF, Ty);
2260 case Intrinsic::exp10:
2262 return ConstantFoldBinaryFP(pow,
APFloat(10.0), APF, Ty);
2263 case Intrinsic::sin:
2264 return ConstantFoldFP(sin, APF, Ty);
2265 case Intrinsic::cos:
2266 return ConstantFoldFP(cos, APF, Ty);
2267 case Intrinsic::sqrt:
2268 return ConstantFoldFP(sqrt, APF, Ty);
2269 case Intrinsic::amdgcn_cos:
2270 case Intrinsic::amdgcn_sin: {
2271 double V = getValueAsDouble(
Op);
2272 if (V < -256.0 || V > 256.0)
2277 bool IsCos = IntrinsicID == Intrinsic::amdgcn_cos;
2278 double V4 =
V * 4.0;
2279 if (V4 == floor(V4)) {
2281 const double SinVals[4] = { 0.0, 1.0, 0.0, -1.0 };
2282 V = SinVals[((int)V4 + (IsCos ? 1 : 0)) & 3];
2289 return GetConstantFoldFPValue(V, Ty);
2305 case LibFunc_acos_finite:
2306 case LibFunc_acosf_finite:
2308 return ConstantFoldFP(acos, APF, Ty);
2312 case LibFunc_asin_finite:
2313 case LibFunc_asinf_finite:
2315 return ConstantFoldFP(asin, APF, Ty);
2320 return ConstantFoldFP(atan, APF, Ty);
2324 if (TLI->
has(Func)) {
2325 U.roundToIntegral(APFloat::rmTowardPositive);
2332 return ConstantFoldFP(cos, APF, Ty);
2336 case LibFunc_cosh_finite:
2337 case LibFunc_coshf_finite:
2339 return ConstantFoldFP(cosh, APF, Ty);
2343 case LibFunc_exp_finite:
2344 case LibFunc_expf_finite:
2346 return ConstantFoldFP(exp, APF, Ty);
2350 case LibFunc_exp2_finite:
2351 case LibFunc_exp2f_finite:
2354 return ConstantFoldBinaryFP(pow,
APFloat(2.0), APF, Ty);
2358 if (TLI->
has(Func)) {
2364 case LibFunc_floorf:
2365 if (TLI->
has(Func)) {
2366 U.roundToIntegral(APFloat::rmTowardNegative);
2372 case LibFunc_log_finite:
2373 case LibFunc_logf_finite:
2375 return ConstantFoldFP(log, APF, Ty);
2379 case LibFunc_log2_finite:
2380 case LibFunc_log2f_finite:
2383 return ConstantFoldFP(
log2, APF, Ty);
2386 case LibFunc_log10f:
2387 case LibFunc_log10_finite:
2388 case LibFunc_log10f_finite:
2391 return ConstantFoldFP(log10, APF, Ty);
2395 case LibFunc_nearbyint:
2396 case LibFunc_nearbyintf:
2399 if (TLI->
has(Func)) {
2400 U.roundToIntegral(APFloat::rmNearestTiesToEven);
2405 case LibFunc_roundf:
2406 if (TLI->
has(Func)) {
2407 U.roundToIntegral(APFloat::rmNearestTiesToAway);
2414 return ConstantFoldFP(sin, APF, Ty);
2418 case LibFunc_sinh_finite:
2419 case LibFunc_sinhf_finite:
2421 return ConstantFoldFP(sinh, APF, Ty);
2426 return ConstantFoldFP(sqrt, APF, Ty);
2431 return ConstantFoldFP(tan, APF, Ty);
2436 return ConstantFoldFP(tanh, APF, Ty);
2439 case LibFunc_truncf:
2440 if (TLI->
has(Func)) {
2441 U.roundToIntegral(APFloat::rmTowardZero);
2449 if (
auto *
Op = dyn_cast<ConstantInt>(
Operands[0])) {
2450 switch (IntrinsicID) {
2451 case Intrinsic::bswap:
2452 return ConstantInt::get(Ty->
getContext(),
Op->getValue().byteSwap());
2453 case Intrinsic::ctpop:
2454 return ConstantInt::get(Ty,
Op->getValue().popcount());
2455 case Intrinsic::bitreverse:
2456 return ConstantInt::get(Ty->
getContext(),
Op->getValue().reverseBits());
2457 case Intrinsic::convert_from_fp16: {
2458 APFloat Val(APFloat::IEEEhalf(),
Op->getValue());
2466 assert(status != APFloat::opInexact && !lost &&
2467 "Precision lost during fp16 constfolding");
2469 return ConstantFP::get(Ty->
getContext(), Val);
2472 case Intrinsic::amdgcn_s_wqm: {
2474 Val |= (Val & 0x5555555555555555ULL) << 1 |
2475 ((Val >> 1) & 0x5555555555555555ULL);
2476 Val |= (Val & 0x3333333333333333ULL) << 2 |
2477 ((Val >> 2) & 0x3333333333333333ULL);
2478 return ConstantInt::get(Ty, Val);
2481 case Intrinsic::amdgcn_s_quadmask: {
2484 for (
unsigned I = 0;
I <
Op->getBitWidth() / 4; ++
I, Val >>= 4) {
2488 QuadMask |= (1ULL <<
I);
2490 return ConstantInt::get(Ty, QuadMask);
2493 case Intrinsic::amdgcn_s_bitreplicate: {
2495 Val = (Val & 0x000000000000FFFFULL) | (Val & 0x00000000FFFF0000ULL) << 16;
2496 Val = (Val & 0x000000FF000000FFULL) | (Val & 0x0000FF000000FF00ULL) << 8;
2497 Val = (Val & 0x000F000F000F000FULL) | (Val & 0x00F000F000F000F0ULL) << 4;
2498 Val = (Val & 0x0303030303030303ULL) | (Val & 0x0C0C0C0C0C0C0C0CULL) << 2;
2499 Val = (Val & 0x1111111111111111ULL) | (Val & 0x2222222222222222ULL) << 1;
2500 Val = Val | Val << 1;
2501 return ConstantInt::get(Ty, Val);
2509 switch (IntrinsicID) {
2511 case Intrinsic::vector_reduce_add:
2512 case Intrinsic::vector_reduce_mul:
2513 case Intrinsic::vector_reduce_and:
2514 case Intrinsic::vector_reduce_or:
2515 case Intrinsic::vector_reduce_xor:
2516 case Intrinsic::vector_reduce_smin:
2517 case Intrinsic::vector_reduce_smax:
2518 case Intrinsic::vector_reduce_umin:
2519 case Intrinsic::vector_reduce_umax:
2526 if (isa<ConstantVector>(
Operands[0]) ||
2527 isa<ConstantDataVector>(
Operands[0])) {
2529 switch (IntrinsicID) {
2531 case Intrinsic::x86_sse_cvtss2si:
2532 case Intrinsic::x86_sse_cvtss2si64:
2533 case Intrinsic::x86_sse2_cvtsd2si:
2534 case Intrinsic::x86_sse2_cvtsd2si64:
2536 dyn_cast_or_null<ConstantFP>(
Op->getAggregateElement(0U)))
2537 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2541 case Intrinsic::x86_sse_cvttss2si:
2542 case Intrinsic::x86_sse_cvttss2si64:
2543 case Intrinsic::x86_sse2_cvttsd2si:
2544 case Intrinsic::x86_sse2_cvttsd2si64:
2546 dyn_cast_or_null<ConstantFP>(
Op->getAggregateElement(0U)))
2547 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2560 auto *FCmp = cast<ConstrainedFPCmpIntrinsic>(Call);
2562 if (FCmp->isSignaling()) {
2564 St = APFloat::opInvalidOp;
2567 St = APFloat::opInvalidOp;
2571 return ConstantInt::get(
Call->getType()->getScalarType(), Result);
2585 const auto *Op1 = dyn_cast<ConstantFP>(
Operands[0]);
2589 const auto *Op2 = dyn_cast<ConstantFP>(
Operands[1]);
2593 const APFloat &Op1V = Op1->getValueAPF();
2594 const APFloat &Op2V = Op2->getValueAPF();
2601 case LibFunc_pow_finite:
2602 case LibFunc_powf_finite:
2604 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
2608 if (TLI->
has(Func)) {
2610 if (APFloat::opStatus::opOK ==
V.mod(Op2->getValueAPF()))
2614 case LibFunc_remainder:
2615 case LibFunc_remainderf:
2616 if (TLI->
has(Func)) {
2618 if (APFloat::opStatus::opOK ==
V.remainder(Op2->getValueAPF()))
2623 case LibFunc_atan2f:
2629 case LibFunc_atan2_finite:
2630 case LibFunc_atan2f_finite:
2632 return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty);
2647 bool IsOp0Undef = isa<UndefValue>(
Operands[0]);
2648 bool IsOp1Undef = isa<UndefValue>(
Operands[1]);
2649 switch (IntrinsicID) {
2650 case Intrinsic::maxnum:
2651 case Intrinsic::minnum:
2652 case Intrinsic::maximum:
2653 case Intrinsic::minimum:
2663 if (
const auto *Op1 = dyn_cast<ConstantFP>(
Operands[0])) {
2664 const APFloat &Op1V = Op1->getValueAPF();
2666 if (
const auto *Op2 = dyn_cast<ConstantFP>(
Operands[1])) {
2667 if (Op2->getType() != Op1->getType())
2669 const APFloat &Op2V = Op2->getValueAPF();
2671 if (
const auto *ConstrIntr =
2672 dyn_cast_if_present<ConstrainedFPIntrinsic>(Call)) {
2676 switch (IntrinsicID) {
2679 case Intrinsic::experimental_constrained_fadd:
2680 St = Res.
add(Op2V, RM);
2682 case Intrinsic::experimental_constrained_fsub:
2685 case Intrinsic::experimental_constrained_fmul:
2688 case Intrinsic::experimental_constrained_fdiv:
2689 St = Res.
divide(Op2V, RM);
2691 case Intrinsic::experimental_constrained_frem:
2694 case Intrinsic::experimental_constrained_fcmp:
2695 case Intrinsic::experimental_constrained_fcmps:
2696 return evaluateCompare(Op1V, Op2V, ConstrIntr);
2700 return ConstantFP::get(Ty->
getContext(), Res);
2704 switch (IntrinsicID) {
2707 case Intrinsic::copysign:
2709 case Intrinsic::minnum:
2711 case Intrinsic::maxnum:
2713 case Intrinsic::minimum:
2715 case Intrinsic::maximum:
2722 switch (IntrinsicID) {
2725 case Intrinsic::pow:
2726 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
2727 case Intrinsic::amdgcn_fmul_legacy:
2732 return ConstantFP::get(Ty->
getContext(), Op1V * Op2V);
2735 }
else if (
auto *Op2C = dyn_cast<ConstantInt>(
Operands[1])) {
2736 switch (IntrinsicID) {
2737 case Intrinsic::ldexp: {
2738 return ConstantFP::get(
2740 scalbn(Op1V, Op2C->getSExtValue(), APFloat::rmNearestTiesToEven));
2742 case Intrinsic::is_fpclass: {
2755 return ConstantInt::get(Ty, Result);
2763 if (IntrinsicID == Intrinsic::powi && Ty->
isHalfTy())
2764 return ConstantFP::get(
2767 (
int)Op2C->getZExtValue())));
2768 if (IntrinsicID == Intrinsic::powi && Ty->
isFloatTy())
2769 return ConstantFP::get(
2772 (
int)Op2C->getZExtValue())));
2773 if (IntrinsicID == Intrinsic::powi && Ty->
isDoubleTy())
2774 return ConstantFP::get(
2777 (
int)Op2C->getZExtValue())));
2784 const APInt *C0, *C1;
2785 if (!getConstIntOrUndef(
Operands[0], C0) ||
2786 !getConstIntOrUndef(
Operands[1], C1))
2789 switch (IntrinsicID) {
2791 case Intrinsic::smax:
2792 case Intrinsic::smin:
2793 case Intrinsic::umax:
2794 case Intrinsic::umin:
2804 return ConstantInt::get(
2810 case Intrinsic::scmp:
2811 case Intrinsic::ucmp:
2816 return ConstantInt::get(Ty, 0);
2819 if (IntrinsicID == Intrinsic::scmp)
2820 Res = C0->
sgt(*C1) ? 1 : C0->
slt(*C1) ? -1 : 0;
2822 Res = C0->
ugt(*C1) ? 1 : C0->
ult(*C1) ? -1 : 0;
2823 return ConstantInt::get(Ty, Res,
true);
2825 case Intrinsic::usub_with_overflow:
2826 case Intrinsic::ssub_with_overflow:
2832 case Intrinsic::uadd_with_overflow:
2833 case Intrinsic::sadd_with_overflow:
2838 cast<StructType>(Ty),
2843 case Intrinsic::smul_with_overflow:
2844 case Intrinsic::umul_with_overflow: {
2852 switch (IntrinsicID) {
2854 case Intrinsic::sadd_with_overflow:
2855 Res = C0->
sadd_ov(*C1, Overflow);
2857 case Intrinsic::uadd_with_overflow:
2858 Res = C0->
uadd_ov(*C1, Overflow);
2860 case Intrinsic::ssub_with_overflow:
2861 Res = C0->
ssub_ov(*C1, Overflow);
2863 case Intrinsic::usub_with_overflow:
2864 Res = C0->
usub_ov(*C1, Overflow);
2866 case Intrinsic::smul_with_overflow:
2867 Res = C0->
smul_ov(*C1, Overflow);
2869 case Intrinsic::umul_with_overflow:
2870 Res = C0->
umul_ov(*C1, Overflow);
2879 case Intrinsic::uadd_sat:
2880 case Intrinsic::sadd_sat:
2890 if (IntrinsicID == Intrinsic::uadd_sat)
2891 return ConstantInt::get(Ty, C0->
uadd_sat(*C1));
2893 return ConstantInt::get(Ty, C0->
sadd_sat(*C1));
2894 case Intrinsic::usub_sat:
2895 case Intrinsic::ssub_sat:
2905 if (IntrinsicID == Intrinsic::usub_sat)
2906 return ConstantInt::get(Ty, C0->
usub_sat(*C1));
2908 return ConstantInt::get(Ty, C0->
ssub_sat(*C1));
2909 case Intrinsic::cttz:
2910 case Intrinsic::ctlz:
2911 assert(C1 &&
"Must be constant int");
2918 if (IntrinsicID == Intrinsic::cttz)
2923 case Intrinsic::abs:
2924 assert(C1 &&
"Must be constant int");
2935 return ConstantInt::get(Ty, C0->
abs());
2936 case Intrinsic::amdgcn_wave_reduce_umin:
2937 case Intrinsic::amdgcn_wave_reduce_umax:
2938 return dyn_cast<Constant>(
Operands[0]);
2945 if ((isa<ConstantVector>(
Operands[0]) ||
2946 isa<ConstantDataVector>(
Operands[0])) &&
2950 cast<ConstantInt>(
Operands[1])->getValue() == 4) {
2952 switch (IntrinsicID) {
2954 case Intrinsic::x86_avx512_vcvtss2si32:
2955 case Intrinsic::x86_avx512_vcvtss2si64:
2956 case Intrinsic::x86_avx512_vcvtsd2si32:
2957 case Intrinsic::x86_avx512_vcvtsd2si64:
2959 dyn_cast_or_null<ConstantFP>(
Op->getAggregateElement(0U)))
2960 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2964 case Intrinsic::x86_avx512_vcvtss2usi32:
2965 case Intrinsic::x86_avx512_vcvtss2usi64:
2966 case Intrinsic::x86_avx512_vcvtsd2usi32:
2967 case Intrinsic::x86_avx512_vcvtsd2usi64:
2969 dyn_cast_or_null<ConstantFP>(
Op->getAggregateElement(0U)))
2970 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2974 case Intrinsic::x86_avx512_cvttss2si:
2975 case Intrinsic::x86_avx512_cvttss2si64:
2976 case Intrinsic::x86_avx512_cvttsd2si:
2977 case Intrinsic::x86_avx512_cvttsd2si64:
2979 dyn_cast_or_null<ConstantFP>(
Op->getAggregateElement(0U)))
2980 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2984 case Intrinsic::x86_avx512_cvttss2usi:
2985 case Intrinsic::x86_avx512_cvttss2usi64:
2986 case Intrinsic::x86_avx512_cvttsd2usi:
2987 case Intrinsic::x86_avx512_cvttsd2usi64:
2989 dyn_cast_or_null<ConstantFP>(
Op->getAggregateElement(0U)))
2990 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
3018 if (
S1.isNegative() &&
S1.isNonZero() && !
S1.isNaN()) {
3040 switch (IntrinsicID) {
3043 case Intrinsic::amdgcn_cubeid:
3045 case Intrinsic::amdgcn_cubema:
3047 case Intrinsic::amdgcn_cubesc:
3049 case Intrinsic::amdgcn_cubetc:
3056 const APInt *C0, *C1, *C2;
3057 if (!getConstIntOrUndef(
Operands[0], C0) ||
3058 !getConstIntOrUndef(
Operands[1], C1) ||
3059 !getConstIntOrUndef(
Operands[2], C2))
3066 unsigned NumUndefBytes = 0;
3067 for (
unsigned I = 0;
I < 32;
I += 8) {
3076 const APInt *Src = ((Sel & 10) == 10 || (Sel & 12) == 4) ? C0 : C1;
3080 B = Src->extractBitsAsZExtValue(8, (Sel & 3) * 8);
3082 B = Src->extractBitsAsZExtValue(1, (Sel & 1) ? 31 : 15) * 0xff;
3085 Val.insertBits(
B,
I, 8);
3088 if (NumUndefBytes == 4)
3091 return ConstantInt::get(Ty, Val);
3102 if (
const auto *Op1 = dyn_cast<ConstantFP>(
Operands[0])) {
3103 if (
const auto *Op2 = dyn_cast<ConstantFP>(
Operands[1])) {
3104 if (
const auto *Op3 = dyn_cast<ConstantFP>(
Operands[2])) {
3105 const APFloat &C1 = Op1->getValueAPF();
3106 const APFloat &C2 = Op2->getValueAPF();
3107 const APFloat &C3 = Op3->getValueAPF();
3109 if (
const auto *ConstrIntr = dyn_cast<ConstrainedFPIntrinsic>(Call)) {
3113 switch (IntrinsicID) {
3116 case Intrinsic::experimental_constrained_fma:
3117 case Intrinsic::experimental_constrained_fmuladd:
3121 if (mayFoldConstrained(
3123 return ConstantFP::get(Ty->
getContext(), Res);
3127 switch (IntrinsicID) {
3129 case Intrinsic::amdgcn_fma_legacy: {
3139 case Intrinsic::fma:
3140 case Intrinsic::fmuladd: {
3142 V.fusedMultiplyAdd(C2, C3, APFloat::rmNearestTiesToEven);
3145 case Intrinsic::amdgcn_cubeid:
3146 case Intrinsic::amdgcn_cubema:
3147 case Intrinsic::amdgcn_cubesc:
3148 case Intrinsic::amdgcn_cubetc: {
3149 APFloat V = ConstantFoldAMDGCNCubeIntrinsic(IntrinsicID, C1, C2, C3);
3157 if (IntrinsicID == Intrinsic::smul_fix ||
3158 IntrinsicID == Intrinsic::smul_fix_sat) {
3164 const APInt *C0, *C1;
3165 if (!getConstIntOrUndef(
Operands[0], C0) ||
3166 !getConstIntOrUndef(
Operands[1], C1))
3180 unsigned Scale = cast<ConstantInt>(
Operands[2])->getZExtValue();
3182 assert(Scale < Width &&
"Illegal scale.");
3183 unsigned ExtendedWidth = Width * 2;
3185 (C0->
sext(ExtendedWidth) * C1->
sext(ExtendedWidth)).ashr(Scale);
3186 if (IntrinsicID == Intrinsic::smul_fix_sat) {
3195 if (IntrinsicID == Intrinsic::fshl || IntrinsicID == Intrinsic::fshr) {
3196 const APInt *C0, *C1, *C2;
3197 if (!getConstIntOrUndef(
Operands[0], C0) ||
3198 !getConstIntOrUndef(
Operands[1], C1) ||
3199 !getConstIntOrUndef(
Operands[2], C2))
3202 bool IsRight = IntrinsicID == Intrinsic::fshr;
3216 unsigned LshrAmt = IsRight ? ShAmt :
BitWidth - ShAmt;
3217 unsigned ShlAmt = !IsRight ? ShAmt :
BitWidth - ShAmt;
3219 return ConstantInt::get(Ty, C1->
lshr(LshrAmt));
3221 return ConstantInt::get(Ty, C0->
shl(ShlAmt));
3222 return ConstantInt::get(Ty, C0->
shl(ShlAmt) | C1->
lshr(LshrAmt));
3225 if (IntrinsicID == Intrinsic::amdgcn_perm)
3226 return ConstantFoldAMDGCNPermIntrinsic(
Operands, Ty);
3238 return ConstantFoldScalarCall1(
Name, IntrinsicID, Ty,
Operands, TLI, Call);
3243 return FoldedLibCall;
3245 return ConstantFoldIntrinsicCall2(IntrinsicID, Ty,
Operands, Call);
3249 return ConstantFoldScalarCall3(
Name, IntrinsicID, Ty,
Operands, TLI, Call);
3254static Constant *ConstantFoldFixedVectorCall(
3262 switch (IntrinsicID) {
3263 case Intrinsic::masked_load: {
3272 auto *MaskElt =
Mask->getAggregateElement(
I);
3275 auto *PassthruElt = Passthru->getAggregateElement(
I);
3277 if (isa<UndefValue>(MaskElt)) {
3285 if (MaskElt->isNullValue()) {
3289 }
else if (MaskElt->isOneValue()) {
3301 case Intrinsic::arm_mve_vctp8:
3302 case Intrinsic::arm_mve_vctp16:
3303 case Intrinsic::arm_mve_vctp32:
3304 case Intrinsic::arm_mve_vctp64: {
3305 if (
auto *
Op = dyn_cast<ConstantInt>(
Operands[0])) {
3310 for (
unsigned i = 0; i < Lanes; i++) {
3320 case Intrinsic::get_active_lane_mask: {
3321 auto *Op0 = dyn_cast<ConstantInt>(
Operands[0]);
3322 auto *Op1 = dyn_cast<ConstantInt>(
Operands[1]);
3326 uint64_t Limit = Op1->getZExtValue();
3329 for (
unsigned i = 0; i < Lanes; i++) {
3330 if (
Base + i < Limit)
3345 for (
unsigned J = 0, JE =
Operands.size(); J != JE; ++J) {
3361 ConstantFoldScalarCall(
Name, IntrinsicID, Ty, Lane, TLI, Call);
3370static Constant *ConstantFoldScalableVectorCall(
3374 switch (IntrinsicID) {
3375 case Intrinsic::aarch64_sve_convert_from_svbool: {
3376 auto *Src = dyn_cast<Constant>(
Operands[0]);
3377 if (!Src || !Src->isNullValue())
3388static std::pair<Constant *, Constant *>
3390 if (isa<PoisonValue>(
Op))
3393 auto *ConstFP = dyn_cast<ConstantFP>(
Op);
3397 const APFloat &
U = ConstFP->getValueAPF();
3399 APFloat FrexpMant =
frexp(U, FrexpExp, APFloat::rmNearestTiesToEven);
3400 Constant *Result0 = ConstantFP::get(ConstFP->getType(), FrexpMant);
3404 Constant *Result1 = FrexpMant.
isFinite() ? ConstantInt::get(IntTy, FrexpExp)
3406 return {Result0, Result1};
3416 switch (IntrinsicID) {
3417 case Intrinsic::frexp: {
3421 if (
auto *FVTy0 = dyn_cast<FixedVectorType>(Ty0)) {
3425 for (
unsigned I = 0, E = FVTy0->getNumElements();
I != E; ++
I) {
3427 std::tie(Results0[
I], Results1[
I]) =
3428 ConstantFoldScalarFrexpCall(Lane, Ty1);
3437 auto [Result0, Result1] = ConstantFoldScalarFrexpCall(
Operands[0], Ty1);
3445 return ConstantFoldScalarCall(
Name, IntrinsicID, StTy,
Operands, TLI, Call);
3456 return ConstantFoldIntrinsicCall2(
ID, Ty, {
LHS,
RHS},
3457 dyn_cast_if_present<CallBase>(FMFSource));
3463 bool AllowNonDeterministic) {
3464 if (Call->isNoBuiltin())
3481 Type *Ty =
F->getReturnType();
3486 if (
auto *FVTy = dyn_cast<FixedVectorType>(Ty))
3487 return ConstantFoldFixedVectorCall(
3490 if (
auto *SVTy = dyn_cast<ScalableVectorType>(Ty))
3491 return ConstantFoldScalableVectorCall(
3494 if (
auto *StTy = dyn_cast<StructType>(Ty))
3495 return ConstantFoldStructCall(
Name, IID, StTy,
Operands,
3496 F->getDataLayout(), TLI, Call);
3501 return ConstantFoldScalarCall(
Name, IID, Ty,
Operands, TLI, Call);
3508 if (Call->isNoBuiltin() || Call->isStrictFP())
3510 Function *
F = Call->getCalledFunction();
3518 if (Call->arg_size() == 1) {
3519 if (
ConstantFP *OpC = dyn_cast<ConstantFP>(Call->getArgOperand(0))) {
3528 case LibFunc_log10l:
3530 case LibFunc_log10f:
3531 return Op.isNaN() || (!
Op.isZero() && !
Op.isNegative());
3537 if (OpC->getType()->isDoubleTy())
3539 if (OpC->getType()->isFloatTy())
3547 if (OpC->getType()->isDoubleTy())
3549 if (OpC->getType()->isFloatTy())
3559 return !
Op.isInfinity();
3563 case LibFunc_tanf: {
3566 Type *Ty = OpC->getType();
3568 return ConstantFoldFP(tan, OpC->getValueAPF(), Ty) !=
nullptr;
3595 if (OpC->getType()->isDoubleTy())
3597 if (OpC->getType()->isFloatTy())
3604 return Op.isNaN() ||
Op.isZero() || !
Op.isNegative();
3614 if (Call->arg_size() == 2) {
3615 ConstantFP *Op0C = dyn_cast<ConstantFP>(Call->getArgOperand(0));
3616 ConstantFP *Op1C = dyn_cast<ConstantFP>(Call->getArgOperand(1));
3624 case LibFunc_powf: {
3630 return ConstantFoldBinaryFP(pow, Op0, Op1, Ty) !=
nullptr;
3638 case LibFunc_remainderl:
3639 case LibFunc_remainder:
3640 case LibFunc_remainderf:
3645 case LibFunc_atan2f:
3646 case LibFunc_atan2l:
3662void TargetFolder::anchor() {}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
This file implements the APSInt class, which is a simple class that represents an arbitrary sized int...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static Constant * FoldBitCast(Constant *V, Type *DestTy)
Constant * getConstantAtOffset(Constant *Base, APInt Offset, const DataLayout &DL)
If this Offset points exactly to the start of an aggregate element, return that element,...
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file defines the DenseMap class.
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
amode Optimize addressing mode
mir Rename Register Operands
static bool InRange(int64_t Value, unsigned short Shift, int LBound, int HBound)
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
static APFloat getQNaN(const fltSemantics &Sem, bool Negative=false, const APInt *payload=nullptr)
Factory for QNaN values.
opStatus divide(const APFloat &RHS, roundingMode RM)
void copySign(const APFloat &RHS)
opStatus convert(const fltSemantics &ToSemantics, roundingMode RM, bool *losesInfo)
opStatus subtract(const APFloat &RHS, roundingMode RM)
double convertToDouble() const
Converts this APFloat to host double value.
bool isPosInfinity() const
opStatus add(const APFloat &RHS, roundingMode RM)
const fltSemantics & getSemantics() const
opStatus multiply(const APFloat &RHS, roundingMode RM)
opStatus fusedMultiplyAdd(const APFloat &Multiplicand, const APFloat &Addend, roundingMode RM)
APInt bitcastToAPInt() const
opStatus convertToInteger(MutableArrayRef< integerPart > Input, unsigned int Width, bool IsSigned, roundingMode RM, bool *IsExact) const
opStatus mod(const APFloat &RHS)
bool isNegInfinity() const
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
Class for arbitrary precision integers.
APInt umul_ov(const APInt &RHS, bool &Overflow) const
APInt usub_sat(const APInt &RHS) const
bool isMinSignedValue() const
Determine if this is the smallest signed value.
uint64_t getZExtValue() const
Get zero extended value.
uint64_t extractBitsAsZExtValue(unsigned numBits, unsigned bitPosition) const
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
APInt trunc(unsigned width) const
Truncate to new width.
APInt abs() const
Get the absolute value.
APInt sadd_sat(const APInt &RHS) const
bool sgt(const APInt &RHS) const
Signed greater than comparison.
APInt usub_ov(const APInt &RHS, bool &Overflow) const
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
APInt urem(const APInt &RHS) const
Unsigned remainder operation.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
APInt sadd_ov(const APInt &RHS, bool &Overflow) const
APInt uadd_ov(const APInt &RHS, bool &Overflow) const
unsigned countr_zero() const
Count the number of trailing zero bits.
unsigned countl_zero() const
The APInt version of std::countl_zero.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
APInt uadd_sat(const APInt &RHS) const
APInt smul_ov(const APInt &RHS, bool &Overflow) const
APInt sext(unsigned width) const
Sign extend to a new width.
APInt shl(unsigned shiftAmt) const
Left-shift function.
bool slt(const APInt &RHS) const
Signed less than comparison.
APInt extractBits(unsigned numBits, unsigned bitPosition) const
Return an APInt with the extracted bits [bitPosition,bitPosition+numBits).
APInt ssub_ov(const APInt &RHS, bool &Overflow) const
bool isOne() const
Determine if this is a value of 1.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
APInt ssub_sat(const APInt &RHS) const
An arbitrary precision integer that knows its signedness.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & back() const
back - Get the last element.
size_t size() const
size - Get the array size.
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
static Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)
Returns the opcode necessary to cast Val into Ty using usual casting rules.
static bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
static Constant * get(LLVMContext &Context, ArrayRef< ElementTy > Elts)
get() constructor - Return a constant with array type with an element count and element type matching...
static Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getExtractElement(Constant *Vec, Constant *Idx, Type *OnlyIfReducedTy=nullptr)
static bool isDesirableCastOp(unsigned Opcode)
Whether creating a constant expression for this cast is desirable.
static Constant * getCast(unsigned ops, Constant *C, Type *Ty, bool OnlyIfReduced=false)
Convenience function for getting a Cast operation.
static Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static Constant * getInsertElement(Constant *Vec, Constant *Elt, Constant *Idx, Type *OnlyIfReducedTy=nullptr)
static Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getShuffleVector(Constant *V1, Constant *V2, ArrayRef< int > Mask, Type *OnlyIfReducedTy=nullptr)
static bool isSupportedGetElementPtr(const Type *SrcElemTy)
Whether creating a constant expression for this getelementptr type is supported.
static Constant * get(unsigned Opcode, Constant *C1, Constant *C2, unsigned Flags=0, Type *OnlyIfReducedTy=nullptr)
get - Return a binary or shift operator constant expression, folding if possible.
static bool isDesirableBinOp(unsigned Opcode)
Whether creating a constant expression for this binary operator is desirable.
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, GEPNoWrapFlags NW=GEPNoWrapFlags::none(), std::optional< ConstantRange > InRange=std::nullopt, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
static Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
ConstantFP - Floating Point Values [float, double].
const APFloat & getValueAPF() const
static Constant * getZero(Type *Ty, bool Negative=false)
This is the shared class of boolean and integer constants.
static ConstantInt * getTrue(LLVMContext &Context)
static ConstantInt * getFalse(LLVMContext &Context)
static ConstantInt * getBool(LLVMContext &Context, bool V)
static Constant * get(StructType *T, ArrayRef< Constant * > V)
static Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
static Constant * getAllOnesValue(Type *Ty)
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Constrained floating point compare intrinsics.
This is the common base class for constrained floating point intrinsics.
std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
std::optional< RoundingMode > getRoundingMode() const
Wrapper for a function that represents a value that functionally represents the original function.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
static bool compare(const APFloat &LHS, const APFloat &RHS, FCmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
Class to represent fixed width SIMD vectors.
unsigned getNumElements() const
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
DenormalMode getDenormalMode(const fltSemantics &FPType) const
Returns the denormal handling type for the default rounding mode of the function.
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags inBounds()
GEPNoWrapFlags withoutNoUnsignedSignedWrap() const
bool hasNoUnsignedSignedWrap() const
static Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
PointerType * getType() const
Global values are always pointers.
const DataLayout & getDataLayout() const
Get the data layout of the module this global belongs to.
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
static bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
const Function * getFunction() const
Return the function this instruction belongs to.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
This is an important class for using LLVM in a threaded context.
static APInt getSaturationPoint(Intrinsic::ID ID, unsigned numBits)
Min/max intrinsics are monotonic, they operate on a fixed-bitwidth values, so there is a certain thre...
ICmpInst::Predicate getPredicate() const
Returns the comparison predicate underlying the intrinsic.
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Class to represent scalable SIMD vectors.
void push_back(const T &Elt)
pointer data()
Return a pointer to the vector's buffer, even if empty().
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
unsigned getElementContainingOffset(uint64_t FixedOffset) const
Given a valid byte offset into the structure, returns the structure index that contains it.
TypeSize getElementOffset(unsigned Idx) const
Class to represent struct types.
Provides information about what library functions are available for the current target.
bool has(LibFunc F) const
Tests whether a library function is available.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
The instances of the Type class are immutable: once they are created, they are never changed.
unsigned getIntegerBitWidth() const
Type * getStructElementType(unsigned N) const
const fltSemantics & getFltSemantics() const
bool isVectorTy() const
True if this is an instance of VectorType.
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
bool isPointerTy() const
True if this is an instance of PointerType.
static IntegerType * getInt1Ty(LLVMContext &C)
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
bool isBFloatTy() const
Return true if this is 'bfloat', a 16-bit bfloat type.
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isX86_MMXTy() const
Return true if this is X86 MMX.
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool isFP128Ty() const
Return true if this is 'fp128'.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isStructTy() const
True if this is an instance of StructType.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
static IntegerType * getInt16Ty(LLVMContext &C)
bool isAggregateType() const
Return true if the type is an aggregate type.
bool isHalfTy() const
Return true if this is 'half', a 16-bit IEEE fp type.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
static IntegerType * getInt8Ty(LLVMContext &C)
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
bool isX86_AMXTy() const
Return true if this is X86 AMX.
static IntegerType * getInt32Ty(LLVMContext &C)
static IntegerType * getInt64Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Type * getContainedType(unsigned i) const
This method is used to implement the type iterator (defined at the end of the file).
bool isIEEELikeFPTy() const
Return true if this is a well-behaved IEEE-like type, which has a IEEE compatible layout as defined b...
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
LLVMContext & getContext() const
All values hold a context through their type.
Type * getElementType() const
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
const ParentTy * getParent() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
const APInt & smin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be signed.
const APInt & smax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be signed.
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
const APInt & umax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be unsigned.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
@ SC
CHAIN = SC CHAIN, Imm128 - System call.
@ CE
Windows NT (Windows on ARM)
@ ebStrict
This corresponds to "fpexcept.strict".
@ ebIgnore
This corresponds to "fpexcept.ignore".
NodeAddr< FuncNode * > Func
std::error_code status(const Twine &path, file_status &result, bool follow=true)
Get file status as if by POSIX stat().
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Constant * ConstantFoldBinaryIntrinsic(Intrinsic::ID ID, Constant *LHS, Constant *RHS, Type *Ty, Instruction *FMFSource)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Constant * ConstantFoldLoadThroughBitcast(Constant *C, Type *DestTy, const DataLayout &DL)
ConstantFoldLoadThroughBitcast - try to cast constant to destination type returning null if unsuccess...
static double log2(double V)
Constant * ConstantFoldSelectInstruction(Constant *Cond, Constant *V1, Constant *V2)
Attempt to constant fold a select instruction with the specified operands.
Constant * ConstantFoldFPInstOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL, const Instruction *I, bool AllowNonDeterministic=true)
Attempt to constant fold a floating point binary operation with the specified operands,...
bool canConstantFoldCallTo(const CallBase *Call, const Function *F)
canConstantFoldCallTo - Return true if its even possible to fold a call to the specified function.
unsigned getPointerAddressSpace(const Type *T)
APFloat abs(APFloat X)
Returns the absolute value of the argument.
Constant * ConstantFoldCompareInstruction(CmpInst::Predicate Predicate, Constant *C1, Constant *C2)
Constant * ConstantFoldUnaryInstruction(unsigned Opcode, Constant *V)
bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, APInt &Offset, const DataLayout &DL, DSOLocalEquivalent **DSOEquiv=nullptr)
If this constant is a constant offset from a global, return the global and the constant.
bool isMathLibCallNoop(const CallBase *Call, const TargetLibraryInfo *TLI)
Check whether the given call has no side-effects.
Constant * ReadByteArrayFromGlobal(const GlobalVariable *GV, uint64_t Offset)
LLVM_READONLY APFloat maximum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 maximum semantics.
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
Constant * ConstantFoldCall(const CallBase *Call, Function *F, ArrayRef< Constant * > Operands, const TargetLibraryInfo *TLI=nullptr, bool AllowNonDeterministic=true)
ConstantFoldCall - Attempt to constant fold a call to the specified function with the specified argum...
APFloat frexp(const APFloat &X, int &Exp, APFloat::roundingMode RM)
Equivalent of C standard library function.
Constant * ConstantFoldExtractValueInstruction(Constant *Agg, ArrayRef< unsigned > Idxs)
Attempt to constant fold an extractvalue instruction with the specified operands and indices.
Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Fold the constant using the specified DataLayout.
LLVM_READONLY APFloat maxnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2019 maximumNumber semantics.
Constant * ConstantFoldLoadFromUniformValue(Constant *C, Type *Ty, const DataLayout &DL)
If C is a uniform value where all bits are the same (either all zero, all ones, all undef or all pois...
Constant * ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op, const DataLayout &DL)
Attempt to constant fold a unary operation with the specified operand.
Constant * FlushFPConstant(Constant *Operand, const Instruction *I, bool IsOutput)
Attempt to flush float point constant according to denormal mode set in the instruction's parent func...
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
APFloat scalbn(APFloat X, int Exp, APFloat::roundingMode RM)
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Constant * ConstantFoldInstOperands(Instruction *I, ArrayRef< Constant * > Ops, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, bool AllowNonDeterministic=true)
ConstantFoldInstOperands - Attempt to constant fold an instruction with the specified operands.
Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
Constant * ConstantFoldLoadFromConst(Constant *C, Type *Ty, const APInt &Offset, const DataLayout &DL)
Extract value of C at the given Offset reinterpreted as Ty.
Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
LLVM_READONLY APFloat minnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2019 minimumNumber semantics.
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
DWARFExpression::Operation Op
Constant * ConstantFoldInstruction(Instruction *I, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldInstruction - Try to constant fold the specified instruction.
RoundingMode
Rounding mode.
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
constexpr unsigned BitWidth
bool isVectorIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, unsigned ScalarOpdIdx)
Identifies if the vector form of the intrinsic has a scalar operand.
Constant * ConstantFoldCastInstruction(unsigned opcode, Constant *V, Type *DestTy)
Constant * ConstantFoldInsertValueInstruction(Constant *Agg, Constant *Val, ArrayRef< unsigned > Idxs)
ConstantFoldInsertValueInstruction - Attempt to constant fold an insertvalue instruction with the spe...
Constant * ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, APInt Offset, const DataLayout &DL)
Return the value that a load from C with offset Offset would produce if it is constant and determinab...
LLVM_READONLY APFloat minimum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 minimum semantics.
Constant * ConstantFoldIntegerCast(Constant *C, Type *DestTy, bool IsSigned, const DataLayout &DL)
Constant fold a zext, sext or trunc, depending on IsSigned and whether the DestTy is wider or narrowe...
Constant * ConstantFoldBinaryInstruction(unsigned Opcode, Constant *V1, Constant *V2)
opStatus
IEEE-754R 7: Default exception handling.
Represent subnormal handling kind for floating point instruction inputs and outputs.
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
DenormalModeKind
Represent handled modes for denormal (aka subnormal) modes in the floating point environment.
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
@ PositiveZero
Denormals are flushed to positive zero.
@ Dynamic
Denormals have unknown treatment.
@ IEEE
IEEE-754 denormal numbers preserved.
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
static constexpr DenormalMode getIEEE()
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
bool isConstant() const
Returns true if we know the value of all bits.
const APInt & getConstant() const
Returns the value when all bits have a known value.