31#include "llvm/Config/config.h"
45#include "llvm/IR/IntrinsicsAArch64.h"
46#include "llvm/IR/IntrinsicsAMDGPU.h"
47#include "llvm/IR/IntrinsicsARM.h"
48#include "llvm/IR/IntrinsicsWebAssembly.h"
49#include "llvm/IR/IntrinsicsX86.h"
77 unsigned BitShift =
DL.getTypeSizeInBits(SrcEltTy);
78 for (
unsigned i = 0; i != NumSrcElts; ++i) {
80 if (
DL.isLittleEndian())
81 Element =
C->getAggregateElement(NumSrcElts - i - 1);
83 Element =
C->getAggregateElement(i);
85 if (Element && isa<UndefValue>(Element)) {
90 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
95 Result |= ElementCI->getValue().zext(
Result.getBitWidth());
106 "Invalid constantexpr bitcast!");
112 if (
auto *VTy = dyn_cast<VectorType>(
C->getType())) {
115 unsigned NumSrcElts = cast<FixedVectorType>(VTy)->getNumElements();
116 Type *SrcEltTy = VTy->getElementType();
129 if (
Constant *CE = foldConstVectorToAPInt(Result, DestTy,
C,
130 SrcEltTy, NumSrcElts,
DL))
133 if (isa<IntegerType>(DestTy))
142 auto *DestVTy = dyn_cast<VectorType>(DestTy);
148 if (isa<ConstantFP>(
C) || isa<ConstantInt>(
C)) {
154 if (!isa<ConstantDataVector>(
C) && !isa<ConstantVector>(
C))
158 unsigned NumDstElt = cast<FixedVectorType>(DestVTy)->getNumElements();
159 unsigned NumSrcElt = cast<FixedVectorType>(
C->getType())->getNumElements();
160 if (NumDstElt == NumSrcElt)
163 Type *SrcEltTy = cast<VectorType>(
C->getType())->getElementType();
164 Type *DstEltTy = DestVTy->getElementType();
197 if (!isa<ConstantVector>(
C) &&
198 !isa<ConstantDataVector>(
C))
206 bool isLittleEndian =
DL.isLittleEndian();
209 if (NumDstElt < NumSrcElt) {
212 unsigned Ratio = NumSrcElt/NumDstElt;
215 for (
unsigned i = 0; i != NumDstElt; ++i) {
218 unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1);
219 for (
unsigned j = 0; j != Ratio; ++j) {
220 Constant *Src =
C->getAggregateElement(SrcElt++);
221 if (Src && isa<UndefValue>(Src))
223 cast<VectorType>(
C->getType())->getElementType());
225 Src = dyn_cast_or_null<ConstantInt>(Src);
235 ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
246 unsigned Ratio = NumDstElt/NumSrcElt;
247 unsigned DstBitSize =
DL.getTypeSizeInBits(DstEltTy);
250 for (
unsigned i = 0; i != NumSrcElt; ++i) {
251 auto *Element =
C->getAggregateElement(i);
256 if (isa<UndefValue>(Element)) {
262 auto *Src = dyn_cast<ConstantInt>(Element);
266 unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1);
267 for (
unsigned j = 0; j != Ratio; ++j) {
272 ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
302 if ((GV = dyn_cast<GlobalValue>(
C))) {
308 if (
auto *FoundDSOEquiv = dyn_cast<DSOLocalEquivalent>(
C)) {
310 *DSOEquiv = FoundDSOEquiv;
311 GV = FoundDSOEquiv->getGlobalValue();
318 auto *CE = dyn_cast<ConstantExpr>(
C);
319 if (!CE)
return false;
322 if (CE->getOpcode() == Instruction::PtrToInt ||
323 CE->getOpcode() == Instruction::BitCast)
328 auto *
GEP = dyn_cast<GEPOperator>(CE);
332 unsigned BitWidth =
DL.getIndexTypeSizeInBits(
GEP->getType());
341 if (!
GEP->accumulateConstantOffset(
DL, TmpOffset))
351 Type *SrcTy =
C->getType();
355 TypeSize DestSize =
DL.getTypeSizeInBits(DestTy);
356 TypeSize SrcSize =
DL.getTypeSizeInBits(SrcTy);
357 if (!TypeSize::isKnownGE(SrcSize, DestSize))
368 if (SrcSize == DestSize &&
375 Cast = Instruction::IntToPtr;
377 Cast = Instruction::PtrToInt;
398 ElemC =
C->getAggregateElement(Elem++);
399 }
while (ElemC &&
DL.getTypeSizeInBits(ElemC->
getType()).isZero());
404 if (
auto *VT = dyn_cast<VectorType>(SrcTy))
405 if (!
DL.typeSizeEqualsStoreSize(VT->getElementType()))
408 C =
C->getAggregateElement(0u);
423 assert(ByteOffset <=
DL.getTypeAllocSize(
C->getType()) &&
424 "Out of range access");
428 if (isa<ConstantAggregateZero>(
C) || isa<UndefValue>(
C))
431 if (
auto *CI = dyn_cast<ConstantInt>(
C)) {
432 if (CI->getBitWidth() > 64 ||
433 (CI->getBitWidth() & 7) != 0)
437 unsigned IntBytes =
unsigned(CI->getBitWidth()/8);
439 for (
unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) {
441 if (!
DL.isLittleEndian())
442 n = IntBytes - n - 1;
443 CurPtr[i] = (
unsigned char)(Val >> (n * 8));
449 if (
auto *CFP = dyn_cast<ConstantFP>(
C)) {
450 if (CFP->getType()->isDoubleTy()) {
452 return ReadDataFromGlobal(
C, ByteOffset, CurPtr, BytesLeft,
DL);
454 if (CFP->getType()->isFloatTy()){
456 return ReadDataFromGlobal(
C, ByteOffset, CurPtr, BytesLeft,
DL);
458 if (CFP->getType()->isHalfTy()){
460 return ReadDataFromGlobal(
C, ByteOffset, CurPtr, BytesLeft,
DL);
465 if (
auto *CS = dyn_cast<ConstantStruct>(
C)) {
469 ByteOffset -= CurEltOffset;
474 uint64_t EltSize =
DL.getTypeAllocSize(CS->getOperand(
Index)->getType());
476 if (ByteOffset < EltSize &&
477 !ReadDataFromGlobal(CS->getOperand(
Index), ByteOffset, CurPtr,
484 if (
Index == CS->getType()->getNumElements())
490 if (BytesLeft <= NextEltOffset - CurEltOffset - ByteOffset)
494 CurPtr += NextEltOffset - CurEltOffset - ByteOffset;
495 BytesLeft -= NextEltOffset - CurEltOffset - ByteOffset;
497 CurEltOffset = NextEltOffset;
502 if (isa<ConstantArray>(
C) || isa<ConstantVector>(
C) ||
503 isa<ConstantDataSequential>(
C)) {
506 if (
auto *AT = dyn_cast<ArrayType>(
C->getType())) {
507 NumElts = AT->getNumElements();
508 EltTy = AT->getElementType();
510 NumElts = cast<FixedVectorType>(
C->getType())->getNumElements();
511 EltTy = cast<FixedVectorType>(
C->getType())->getElementType();
513 uint64_t EltSize =
DL.getTypeAllocSize(EltTy);
518 if (!ReadDataFromGlobal(
C->getAggregateElement(
Index),
Offset, CurPtr,
523 assert(BytesWritten <= EltSize &&
"Not indexing into this element?");
524 if (BytesWritten >= BytesLeft)
528 BytesLeft -= BytesWritten;
529 CurPtr += BytesWritten;
534 if (
auto *CE = dyn_cast<ConstantExpr>(
C)) {
535 if (
CE->getOpcode() == Instruction::IntToPtr &&
536 CE->getOperand(0)->getType() ==
DL.getIntPtrType(
CE->getType())) {
537 return ReadDataFromGlobal(
CE->getOperand(0), ByteOffset, CurPtr,
549 if (isa<ScalableVectorType>(LoadTy))
552 auto *IntType = dyn_cast<IntegerType>(LoadTy);
565 DL.getTypeSizeInBits(LoadTy).getFixedValue());
567 if (Res->isNullValue() && !LoadTy->
isX86_MMXTy() &&
575 if (Res->isNullValue() && !LoadTy->
isX86_MMXTy() &&
588 unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8;
589 if (BytesLoaded > 32 || BytesLoaded == 0)
593 if (
Offset <= -1 *
static_cast<int64_t
>(BytesLoaded))
597 TypeSize InitializerSize =
DL.getTypeAllocSize(
C->getType());
605 unsigned char RawBytes[32] = {0};
606 unsigned char *CurPtr = RawBytes;
607 unsigned BytesLeft = BytesLoaded;
616 if (!ReadDataFromGlobal(
C,
Offset, CurPtr, BytesLeft,
DL))
619 APInt ResultVal =
APInt(IntType->getBitWidth(), 0);
620 if (
DL.isLittleEndian()) {
621 ResultVal = RawBytes[BytesLoaded - 1];
622 for (
unsigned i = 1; i != BytesLoaded; ++i) {
624 ResultVal |= RawBytes[BytesLoaded - 1 - i];
627 ResultVal = RawBytes[0];
628 for (
unsigned i = 1; i != BytesLoaded; ++i) {
630 ResultVal |= RawBytes[i];
654 if (NBytes > UINT16_MAX)
662 unsigned char *CurPtr = RawBytes.
data();
664 if (!ReadDataFromGlobal(
Init,
Offset, CurPtr, NBytes,
DL))
677 if (!isa<ConstantAggregate>(
Base) && !isa<ConstantDataSequential>(
Base))
682 if (!
Offset.isZero() || !Indices[0].isZero())
687 if (
Index.isNegative() ||
Index.getActiveBits() >= 32)
690 C =
C->getAggregateElement(
Index.getZExtValue());
716 if (
Offset.getMinSignedBits() <= 64)
718 FoldReinterpretLoadFromConst(
C, Ty,
Offset.getSExtValue(),
DL))
732 C = cast<Constant>(
C->stripAndAccumulateConstantOffsets(
735 if (
auto *GV = dyn_cast<GlobalVariable>(
C))
736 if (GV->isConstant() && GV->hasDefinitiveInitializer())
744 if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
761 if (isa<PoisonValue>(
C))
763 if (isa<UndefValue>(
C))
767 if (
C->isAllOnesValue() &&
787 if (Opc == Instruction::And) {
790 if ((Known1.
One | Known0.
Zero).isAllOnes()) {
794 if ((Known0.
One | Known1.
Zero).isAllOnes()) {
806 if (Opc == Instruction::Sub) {
812 unsigned OpSize =
DL.getTypeSizeInBits(Op0->
getType());
828 Type *ResultTy, std::optional<unsigned> InRangeIndex,
830 Type *IntIdxTy =
DL.getIndexType(ResultTy);
835 for (
unsigned i = 1, e = Ops.
size(); i != e; ++i) {
838 SrcElemTy, Ops.
slice(1, i - 1)))) &&
839 Ops[i]->getType()->getScalarType() != IntIdxScalarTy) {
841 Type *NewType = Ops[i]->getType()->isVectorTy()
857 SrcElemTy, Ops[0], NewIdxs,
false, InRangeIndex);
863 assert(
Ptr->getType()->isPointerTy() &&
"Not a pointer type");
864 auto *OldPtrTy = cast<PointerType>(
Ptr->getType());
865 Ptr = cast<Constant>(
Ptr->stripPointerCasts());
866 auto *NewPtrTy = cast<PointerType>(
Ptr->getType());
869 if (NewPtrTy->getAddressSpace() != OldPtrTy->getAddressSpace()) {
871 Ptr, PointerType::getWithSamePointeeType(NewPtrTy,
872 OldPtrTy->getAddressSpace()));
883 bool InBounds =
GEP->isInBounds();
885 Type *SrcElemTy =
GEP->getSourceElementType();
886 Type *ResElemTy =
GEP->getResultElementType();
888 if (!SrcElemTy->
isSized() || isa<ScalableVectorType>(SrcElemTy))
891 if (
Constant *
C = CastGEPIndices(SrcElemTy, Ops, ResTy,
892 GEP->getInRangeIndex(),
DL, TLI))
896 if (!
Ptr->getType()->isPointerTy())
899 Type *IntIdxTy =
DL.getIndexType(
Ptr->getType());
901 for (
unsigned i = 1, e = Ops.
size(); i != e; ++i)
902 if (!isa<ConstantInt>(Ops[i]))
905 unsigned BitWidth =
DL.getTypeSizeInBits(IntIdxTy);
908 DL.getIndexedOffsetInType(
910 Ptr = StripPtrCastKeepAS(
Ptr);
913 while (
auto *
GEP = dyn_cast<GEPOperator>(
Ptr)) {
915 InBounds &=
GEP->isInBounds();
920 bool AllConstantInt =
true;
921 for (
Value *NestedOp : NestedOps)
922 if (!isa<ConstantInt>(NestedOp)) {
923 AllConstantInt =
false;
929 Ptr = cast<Constant>(
GEP->getOperand(0));
930 SrcElemTy =
GEP->getSourceElementType();
932 Ptr = StripPtrCastKeepAS(
Ptr);
938 if (
auto *CE = dyn_cast<ConstantExpr>(
Ptr)) {
939 if (
CE->getOpcode() == Instruction::IntToPtr) {
940 if (
auto *
Base = dyn_cast<ConstantInt>(
CE->getOperand(0)))
945 auto *PTy = cast<PointerType>(
Ptr->getType());
946 if ((
Ptr->isNullValue() || BasePtr != 0) &&
947 !
DL.isNonIntegralPointerType(PTy)) {
959 if (
auto *GV = dyn_cast<GlobalValue>(
Ptr))
960 SrcElemTy = GV->getValueType();
961 else if (!PTy->isOpaque())
969 Type *ElemTy = SrcElemTy;
978 while (ElemTy != ResElemTy) {
994 std::optional<unsigned> InRangeIndex;
995 if (std::optional<unsigned> LastIRIndex = InnermostGEP->
getInRangeIndex())
997 NewIdxs.
size() > *LastIRIndex) {
998 InRangeIndex = LastIRIndex;
999 for (
unsigned I = 0;
I <= *LastIRIndex; ++
I)
1006 InBounds, InRangeIndex);
1008 cast<PointerType>(
C->getType())->isOpaqueOrPointeeTypeMatches(ElemTy) &&
1009 "Computed GetElementPtr has unexpected type!");
1013 if (
C->getType() != ResTy)
1024Constant *ConstantFoldInstOperandsImpl(
const Value *InstOrCE,
unsigned Opcode,
1037 case Instruction::FAdd:
1038 case Instruction::FSub:
1039 case Instruction::FMul:
1040 case Instruction::FDiv:
1041 case Instruction::FRem:
1045 if (
const auto *
I = dyn_cast<Instruction>(InstOrCE)) {
1055 if (
auto *
GEP = dyn_cast<GEPOperator>(InstOrCE)) {
1061 GEP->getInRangeIndex());
1064 if (
auto *CE = dyn_cast<ConstantExpr>(InstOrCE)) {
1065 if (
CE->isCompare())
1068 return CE->getWithOperands(Ops);
1072 default:
return nullptr;
1073 case Instruction::ICmp:
1074 case Instruction::FCmp: {
1075 auto *
C = cast<CmpInst>(InstOrCE);
1079 case Instruction::Freeze:
1081 case Instruction::Call:
1082 if (
auto *
F = dyn_cast<Function>(Ops.
back())) {
1083 const auto *
Call = cast<CallBase>(InstOrCE);
1088 case Instruction::Select:
1090 case Instruction::ExtractElement:
1092 case Instruction::ExtractValue:
1094 Ops[0], cast<ExtractValueInst>(InstOrCE)->getIndices());
1095 case Instruction::InsertElement:
1097 case Instruction::InsertValue:
1099 Ops[0], Ops[1], cast<InsertValueInst>(InstOrCE)->getIndices());
1100 case Instruction::ShuffleVector:
1102 Ops[0], Ops[1], cast<ShuffleVectorInst>(InstOrCE)->getShuffleMask());
1103 case Instruction::Load: {
1104 const auto *LI = dyn_cast<LoadInst>(InstOrCE);
1105 if (LI->isVolatile())
1124 if (!isa<ConstantVector>(
C) && !isa<ConstantExpr>(
C))
1128 for (
const Use &OldU :
C->operands()) {
1129 Constant *OldC = cast<Constant>(&OldU);
1133 if (isa<ConstantVector>(OldC) || isa<ConstantExpr>(OldC)) {
1134 auto It = FoldedOps.
find(OldC);
1135 if (It == FoldedOps.
end()) {
1136 NewC = ConstantFoldConstantImpl(OldC,
DL, TLI, FoldedOps);
1137 FoldedOps.
insert({OldC, NewC});
1145 if (
auto *CE = dyn_cast<ConstantExpr>(
C)) {
1147 ConstantFoldInstOperandsImpl(CE,
CE->getOpcode(), Ops,
DL, TLI))
1152 assert(isa<ConstantVector>(
C));
1161 if (
auto *PN = dyn_cast<PHINode>(
I)) {
1165 for (
Value *Incoming : PN->incoming_values()) {
1170 if (isa<UndefValue>(Incoming))
1173 auto *
C = dyn_cast<Constant>(Incoming);
1177 C = ConstantFoldConstantImpl(
C,
DL, TLI, FoldedOps);
1180 if (CommonValue &&
C != CommonValue)
1191 if (!
all_of(
I->operands(), [](
Use &U) { return isa<Constant>(U); }))
1196 for (
const Use &OpU :
I->operands()) {
1197 auto *Op = cast<Constant>(&OpU);
1199 Op = ConstantFoldConstantImpl(Op,
DL, TLI, FoldedOps);
1209 return ConstantFoldConstantImpl(
C,
DL, TLI, FoldedOps);
1216 return ConstantFoldInstOperandsImpl(
I,
I->getOpcode(), Ops,
DL, TLI);
1233 if (
auto *CE0 = dyn_cast<ConstantExpr>(Ops0)) {
1235 if (CE0->getOpcode() == Instruction::IntToPtr) {
1236 Type *IntPtrTy =
DL.getIntPtrType(CE0->getType());
1247 if (CE0->getOpcode() == Instruction::PtrToInt) {
1248 Type *IntPtrTy =
DL.getIntPtrType(CE0->getOperand(0)->getType());
1249 if (CE0->getType() == IntPtrTy) {
1257 if (
auto *CE1 = dyn_cast<ConstantExpr>(Ops1)) {
1258 if (CE0->getOpcode() == CE1->getOpcode()) {
1259 if (CE0->getOpcode() == Instruction::IntToPtr) {
1260 Type *IntPtrTy =
DL.getIntPtrType(CE0->getType());
1273 if (CE0->getOpcode() == Instruction::PtrToInt) {
1274 Type *IntPtrTy =
DL.getIntPtrType(CE0->getOperand(0)->getType());
1275 if (CE0->getType() == IntPtrTy &&
1276 CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType()) {
1278 Predicate, CE0->getOperand(0), CE1->getOperand(0),
DL, TLI);
1286 if ((Predicate == ICmpInst::ICMP_EQ || Predicate == ICmpInst::ICMP_NE) &&
1287 CE0->getOpcode() == Instruction::Or && Ops1->
isNullValue()) {
1289 Predicate, CE0->getOperand(0), Ops1,
DL, TLI);
1291 Predicate, CE0->getOperand(1), Ops1,
DL, TLI);
1293 Predicate == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
1303 unsigned IndexWidth =
DL.getIndexTypeSizeInBits(Ops0->
getType());
1304 APInt Offset0(IndexWidth, 0);
1307 APInt Offset1(IndexWidth, 0);
1310 if (Stripped0 == Stripped1)
1316 }
else if (isa<ConstantExpr>(Ops1)) {
1319 Predicate = ICmpInst::getSwappedPredicate(Predicate);
1342 if (isa<ConstantExpr>(
LHS) || isa<ConstantExpr>(
RHS))
1353 if (!
I || !
I->getParent() || !
I->getFunction())
1356 ConstantFP *CFP = dyn_cast<ConstantFP>(Operand);
1416 case Instruction::PtrToInt:
1417 if (
auto *CE = dyn_cast<ConstantExpr>(
C)) {
1421 if (CE->getOpcode() == Instruction::IntToPtr) {
1424 CE->getOperand(0),
DL.getIntPtrType(CE->getType()),
1426 }
else if (
auto *
GEP = dyn_cast<GEPOperator>(CE)) {
1430 unsigned BitWidth =
DL.getIndexTypeSizeInBits(
GEP->getType());
1432 auto *
Base = cast<Constant>(
GEP->stripAndAccumulateConstantOffsets(
1433 DL, BaseOffset,
true));
1434 if (
Base->isNullValue()) {
1438 if (
GEP->getNumIndices() == 1 &&
1439 GEP->getSourceElementType()->isIntegerTy(8)) {
1440 auto *
Ptr = cast<Constant>(
GEP->getPointerOperand());
1441 auto *Sub = dyn_cast<ConstantExpr>(
GEP->getOperand(1));
1442 Type *IntIdxTy =
DL.getIndexType(
Ptr->getType());
1443 if (Sub && Sub->getType() == IntIdxTy &&
1444 Sub->getOpcode() == Instruction::Sub &&
1445 Sub->getOperand(0)->isNullValue())
1458 case Instruction::IntToPtr:
1463 if (
auto *CE = dyn_cast<ConstantExpr>(
C)) {
1464 if (CE->getOpcode() == Instruction::PtrToInt) {
1465 Constant *SrcPtr = CE->getOperand(0);
1466 unsigned SrcPtrSize =
DL.getPointerTypeSizeInBits(SrcPtr->
getType());
1467 unsigned MidIntSize = CE->getType()->getScalarSizeInBits();
1469 if (MidIntSize >= SrcPtrSize) {
1478 case Instruction::Trunc:
1479 case Instruction::ZExt:
1480 case Instruction::SExt:
1481 case Instruction::FPTrunc:
1482 case Instruction::FPExt:
1483 case Instruction::UIToFP:
1484 case Instruction::SIToFP:
1485 case Instruction::FPToUI:
1486 case Instruction::FPToSI:
1487 case Instruction::AddrSpaceCast:
1489 case Instruction::BitCast:
1499 if (Call->isNoBuiltin())
1501 if (Call->getFunctionType() !=
F->getFunctionType())
1503 switch (
F->getIntrinsicID()) {
1506 case Intrinsic::bswap:
1507 case Intrinsic::ctpop:
1508 case Intrinsic::ctlz:
1509 case Intrinsic::cttz:
1510 case Intrinsic::fshl:
1511 case Intrinsic::fshr:
1512 case Intrinsic::launder_invariant_group:
1513 case Intrinsic::strip_invariant_group:
1514 case Intrinsic::masked_load:
1515 case Intrinsic::get_active_lane_mask:
1516 case Intrinsic::abs:
1517 case Intrinsic::smax:
1518 case Intrinsic::smin:
1519 case Intrinsic::umax:
1520 case Intrinsic::umin:
1521 case Intrinsic::sadd_with_overflow:
1522 case Intrinsic::uadd_with_overflow:
1523 case Intrinsic::ssub_with_overflow:
1524 case Intrinsic::usub_with_overflow:
1525 case Intrinsic::smul_with_overflow:
1526 case Intrinsic::umul_with_overflow:
1527 case Intrinsic::sadd_sat:
1528 case Intrinsic::uadd_sat:
1529 case Intrinsic::ssub_sat:
1530 case Intrinsic::usub_sat:
1531 case Intrinsic::smul_fix:
1532 case Intrinsic::smul_fix_sat:
1533 case Intrinsic::bitreverse:
1534 case Intrinsic::is_constant:
1535 case Intrinsic::vector_reduce_add:
1536 case Intrinsic::vector_reduce_mul:
1537 case Intrinsic::vector_reduce_and:
1538 case Intrinsic::vector_reduce_or:
1539 case Intrinsic::vector_reduce_xor:
1540 case Intrinsic::vector_reduce_smin:
1541 case Intrinsic::vector_reduce_smax:
1542 case Intrinsic::vector_reduce_umin:
1543 case Intrinsic::vector_reduce_umax:
1545 case Intrinsic::amdgcn_perm:
1546 case Intrinsic::arm_mve_vctp8:
1547 case Intrinsic::arm_mve_vctp16:
1548 case Intrinsic::arm_mve_vctp32:
1549 case Intrinsic::arm_mve_vctp64:
1550 case Intrinsic::aarch64_sve_convert_from_svbool:
1552 case Intrinsic::wasm_trunc_signed:
1553 case Intrinsic::wasm_trunc_unsigned:
1558 case Intrinsic::minnum:
1559 case Intrinsic::maxnum:
1560 case Intrinsic::minimum:
1561 case Intrinsic::maximum:
1562 case Intrinsic::log:
1563 case Intrinsic::log2:
1564 case Intrinsic::log10:
1565 case Intrinsic::exp:
1566 case Intrinsic::exp2:
1567 case Intrinsic::sqrt:
1568 case Intrinsic::sin:
1569 case Intrinsic::cos:
1570 case Intrinsic::pow:
1571 case Intrinsic::powi:
1572 case Intrinsic::fma:
1573 case Intrinsic::fmuladd:
1574 case Intrinsic::fptoui_sat:
1575 case Intrinsic::fptosi_sat:
1576 case Intrinsic::convert_from_fp16:
1577 case Intrinsic::convert_to_fp16:
1578 case Intrinsic::amdgcn_cos:
1579 case Intrinsic::amdgcn_cubeid:
1580 case Intrinsic::amdgcn_cubema:
1581 case Intrinsic::amdgcn_cubesc:
1582 case Intrinsic::amdgcn_cubetc:
1583 case Intrinsic::amdgcn_fmul_legacy:
1584 case Intrinsic::amdgcn_fma_legacy:
1585 case Intrinsic::amdgcn_fract:
1586 case Intrinsic::amdgcn_ldexp:
1587 case Intrinsic::amdgcn_sin:
1589 case Intrinsic::x86_sse_cvtss2si:
1590 case Intrinsic::x86_sse_cvtss2si64:
1591 case Intrinsic::x86_sse_cvttss2si:
1592 case Intrinsic::x86_sse_cvttss2si64:
1593 case Intrinsic::x86_sse2_cvtsd2si:
1594 case Intrinsic::x86_sse2_cvtsd2si64:
1595 case Intrinsic::x86_sse2_cvttsd2si:
1596 case Intrinsic::x86_sse2_cvttsd2si64:
1597 case Intrinsic::x86_avx512_vcvtss2si32:
1598 case Intrinsic::x86_avx512_vcvtss2si64:
1599 case Intrinsic::x86_avx512_cvttss2si:
1600 case Intrinsic::x86_avx512_cvttss2si64:
1601 case Intrinsic::x86_avx512_vcvtsd2si32:
1602 case Intrinsic::x86_avx512_vcvtsd2si64:
1603 case Intrinsic::x86_avx512_cvttsd2si:
1604 case Intrinsic::x86_avx512_cvttsd2si64:
1605 case Intrinsic::x86_avx512_vcvtss2usi32:
1606 case Intrinsic::x86_avx512_vcvtss2usi64:
1607 case Intrinsic::x86_avx512_cvttss2usi:
1608 case Intrinsic::x86_avx512_cvttss2usi64:
1609 case Intrinsic::x86_avx512_vcvtsd2usi32:
1610 case Intrinsic::x86_avx512_vcvtsd2usi64:
1611 case Intrinsic::x86_avx512_cvttsd2usi:
1612 case Intrinsic::x86_avx512_cvttsd2usi64:
1613 return !Call->isStrictFP();
1617 case Intrinsic::fabs:
1618 case Intrinsic::copysign:
1619 case Intrinsic::is_fpclass:
1622 case Intrinsic::ceil:
1623 case Intrinsic::floor:
1624 case Intrinsic::round:
1625 case Intrinsic::roundeven:
1626 case Intrinsic::trunc:
1627 case Intrinsic::nearbyint:
1628 case Intrinsic::rint:
1629 case Intrinsic::canonicalize:
1632 case Intrinsic::experimental_constrained_fma:
1633 case Intrinsic::experimental_constrained_fmuladd:
1634 case Intrinsic::experimental_constrained_fadd:
1635 case Intrinsic::experimental_constrained_fsub:
1636 case Intrinsic::experimental_constrained_fmul:
1637 case Intrinsic::experimental_constrained_fdiv:
1638 case Intrinsic::experimental_constrained_frem:
1639 case Intrinsic::experimental_constrained_ceil:
1640 case Intrinsic::experimental_constrained_floor:
1641 case Intrinsic::experimental_constrained_round:
1642 case Intrinsic::experimental_constrained_roundeven:
1643 case Intrinsic::experimental_constrained_trunc:
1644 case Intrinsic::experimental_constrained_nearbyint:
1645 case Intrinsic::experimental_constrained_rint:
1646 case Intrinsic::experimental_constrained_fcmp:
1647 case Intrinsic::experimental_constrained_fcmps:
1654 if (!
F->hasName() || Call->isStrictFP())
1665 return Name ==
"acos" ||
Name ==
"acosf" ||
1666 Name ==
"asin" ||
Name ==
"asinf" ||
1667 Name ==
"atan" ||
Name ==
"atanf" ||
1668 Name ==
"atan2" ||
Name ==
"atan2f";
1670 return Name ==
"ceil" ||
Name ==
"ceilf" ||
1674 return Name ==
"exp" ||
Name ==
"expf" ||
1677 return Name ==
"fabs" ||
Name ==
"fabsf" ||
1678 Name ==
"floor" ||
Name ==
"floorf" ||
1681 return Name ==
"log" ||
Name ==
"logf" ||
1682 Name ==
"log2" ||
Name ==
"log2f" ||
1683 Name ==
"log10" ||
Name ==
"log10f";
1685 return Name ==
"nearbyint" ||
Name ==
"nearbyintf";
1687 return Name ==
"pow" ||
Name ==
"powf";
1689 return Name ==
"remainder" ||
Name ==
"remainderf" ||
1690 Name ==
"rint" ||
Name ==
"rintf" ||
1691 Name ==
"round" ||
Name ==
"roundf";
1693 return Name ==
"sin" ||
Name ==
"sinf" ||
1694 Name ==
"sinh" ||
Name ==
"sinhf" ||
1697 return Name ==
"tan" ||
Name ==
"tanf" ||
1698 Name ==
"tanh" ||
Name ==
"tanhf" ||
1699 Name ==
"trunc" ||
Name ==
"truncf";
1707 if (
Name.size() < 12 ||
Name[1] !=
'_')
1713 return Name ==
"__acos_finite" ||
Name ==
"__acosf_finite" ||
1714 Name ==
"__asin_finite" ||
Name ==
"__asinf_finite" ||
1715 Name ==
"__atan2_finite" ||
Name ==
"__atan2f_finite";
1717 return Name ==
"__cosh_finite" ||
Name ==
"__coshf_finite";
1719 return Name ==
"__exp_finite" ||
Name ==
"__expf_finite" ||
1720 Name ==
"__exp2_finite" ||
Name ==
"__exp2f_finite";
1722 return Name ==
"__log_finite" ||
Name ==
"__logf_finite" ||
1723 Name ==
"__log10_finite" ||
Name ==
"__log10f_finite";
1725 return Name ==
"__pow_finite" ||
Name ==
"__powf_finite";
1727 return Name ==
"__sinh_finite" ||
Name ==
"__sinhf_finite";
1738 APF.convert(Ty->
getFltSemantics(), APFloat::rmNearestTiesToEven, &unused);
1747inline void llvm_fenv_clearexcept() {
1748#if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT
1749 feclearexcept(FE_ALL_EXCEPT);
1755inline bool llvm_fenv_testexcept() {
1756 int errno_val = errno;
1757 if (errno_val == ERANGE || errno_val == EDOM)
1759#if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT && HAVE_DECL_FE_INEXACT
1760 if (fetestexcept(FE_ALL_EXCEPT & ~FE_INEXACT))
1768 llvm_fenv_clearexcept();
1770 if (llvm_fenv_testexcept()) {
1771 llvm_fenv_clearexcept();
1775 return GetConstantFoldFPValue(Result, Ty);
1778Constant *ConstantFoldBinaryFP(
double (*NativeFP)(
double,
double),
1780 llvm_fenv_clearexcept();
1782 if (llvm_fenv_testexcept()) {
1783 llvm_fenv_clearexcept();
1787 return GetConstantFoldFPValue(Result, Ty);
1797 if (isa<ConstantAggregateZero>(Op))
1801 if (isa<PoisonValue>(Op) ||
Op->containsPoisonElement())
1805 if (!isa<ConstantVector>(Op) && !isa<ConstantDataVector>(Op))
1808 auto *EltC = dyn_cast<ConstantInt>(
Op->getAggregateElement(0U));
1812 APInt Acc = EltC->getValue();
1814 if (!(EltC = dyn_cast<ConstantInt>(
Op->getAggregateElement(
I))))
1816 const APInt &
X = EltC->getValue();
1818 case Intrinsic::vector_reduce_add:
1821 case Intrinsic::vector_reduce_mul:
1824 case Intrinsic::vector_reduce_and:
1827 case Intrinsic::vector_reduce_or:
1830 case Intrinsic::vector_reduce_xor:
1833 case Intrinsic::vector_reduce_smin:
1836 case Intrinsic::vector_reduce_smax:
1839 case Intrinsic::vector_reduce_umin:
1842 case Intrinsic::vector_reduce_umax:
1858Constant *ConstantFoldSSEConvertToInt(
const APFloat &Val,
bool roundTowardZero,
1859 Type *Ty,
bool IsSigned) {
1862 assert(ResultWidth <= 64 &&
1863 "Can only constant fold conversions to 64 and 32 bit ints");
1866 bool isExact =
false;
1868 : APFloat::rmNearestTiesToEven;
1871 IsSigned,
mode, &isExact);
1872 if (status != APFloat::opOK &&
1873 (!roundTowardZero || status != APFloat::opInexact))
1879 Type *Ty =
Op->getType();
1882 return Op->getValueAPF().convertToDouble();
1886 APF.
convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, &unused);
1890static bool getConstIntOrUndef(
Value *Op,
const APInt *&
C) {
1891 if (
auto *CI = dyn_cast<ConstantInt>(Op)) {
1892 C = &CI->getValue();
1895 if (isa<UndefValue>(Op)) {
1914 if (St == APFloat::opStatus::opOK)
1919 if (ORM && *ORM == RoundingMode::Dynamic)
1924 if (EB && *EB != fp::ExceptionBehavior::ebStrict)
1936 if (!ORM || *ORM == RoundingMode::Dynamic)
1941 return RoundingMode::NearestTiesToEven;
1963 if (Src.isNormal() || Src.isInfinity())
1991 if (IntrinsicID == Intrinsic::is_constant) {
1995 if (
Operands[0]->isManifestConstant())
2000 if (isa<PoisonValue>(
Operands[0])) {
2002 if (IntrinsicID == Intrinsic::canonicalize)
2006 if (isa<UndefValue>(
Operands[0])) {
2010 if (IntrinsicID == Intrinsic::cos ||
2011 IntrinsicID == Intrinsic::ctpop ||
2012 IntrinsicID == Intrinsic::fptoui_sat ||
2013 IntrinsicID == Intrinsic::fptosi_sat ||
2014 IntrinsicID == Intrinsic::canonicalize)
2016 if (IntrinsicID == Intrinsic::bswap ||
2017 IntrinsicID == Intrinsic::bitreverse ||
2018 IntrinsicID == Intrinsic::launder_invariant_group ||
2019 IntrinsicID == Intrinsic::strip_invariant_group)
2023 if (isa<ConstantPointerNull>(
Operands[0])) {
2025 if (IntrinsicID == Intrinsic::launder_invariant_group ||
2026 IntrinsicID == Intrinsic::strip_invariant_group) {
2031 Call->getParent() ?
Call->getCaller() :
nullptr;
2041 if (
auto *Op = dyn_cast<ConstantFP>(
Operands[0])) {
2042 if (IntrinsicID == Intrinsic::convert_to_fp16) {
2046 Val.
convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &lost);
2053 if (IntrinsicID == Intrinsic::wasm_trunc_signed ||
2054 IntrinsicID == Intrinsic::wasm_trunc_unsigned) {
2055 bool Signed = IntrinsicID == Intrinsic::wasm_trunc_signed;
2062 bool IsExact =
false;
2066 if (
Status == APFloat::opOK ||
Status == APFloat::opInexact)
2072 if (IntrinsicID == Intrinsic::fptoui_sat ||
2073 IntrinsicID == Intrinsic::fptosi_sat) {
2076 IntrinsicID == Intrinsic::fptoui_sat);
2082 if (IntrinsicID == Intrinsic::canonicalize)
2083 return constantFoldCanonicalize(Ty, Call, U);
2090 if (IntrinsicID == Intrinsic::nearbyint || IntrinsicID == Intrinsic::rint) {
2095 if (IntrinsicID == Intrinsic::round) {
2100 if (IntrinsicID == Intrinsic::roundeven) {
2105 if (IntrinsicID == Intrinsic::ceil) {
2110 if (IntrinsicID == Intrinsic::floor) {
2115 if (IntrinsicID == Intrinsic::trunc) {
2120 if (IntrinsicID == Intrinsic::fabs) {
2125 if (IntrinsicID == Intrinsic::amdgcn_fract) {
2131 FloorU.roundToIntegral(APFloat::rmTowardNegative);
2134 AlmostOne.next(
true);
2141 std::optional<APFloat::roundingMode>
RM;
2142 switch (IntrinsicID) {
2145 case Intrinsic::experimental_constrained_nearbyint:
2146 case Intrinsic::experimental_constrained_rint: {
2147 auto CI = cast<ConstrainedFPIntrinsic>(Call);
2148 RM = CI->getRoundingMode();
2149 if (!RM || *RM == RoundingMode::Dynamic)
2153 case Intrinsic::experimental_constrained_round:
2154 RM = APFloat::rmNearestTiesToAway;
2156 case Intrinsic::experimental_constrained_ceil:
2157 RM = APFloat::rmTowardPositive;
2159 case Intrinsic::experimental_constrained_floor:
2160 RM = APFloat::rmTowardNegative;
2162 case Intrinsic::experimental_constrained_trunc:
2163 RM = APFloat::rmTowardZero;
2167 auto CI = cast<ConstrainedFPIntrinsic>(Call);
2170 if (IntrinsicID == Intrinsic::experimental_constrained_rint &&
2171 St == APFloat::opInexact) {
2172 std::optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
2177 std::optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
2197 switch (IntrinsicID) {
2199 case Intrinsic::log:
2200 return ConstantFoldFP(log, APF, Ty);
2201 case Intrinsic::log2:
2203 return ConstantFoldFP(
log2, APF, Ty);
2204 case Intrinsic::log10:
2206 return ConstantFoldFP(log10, APF, Ty);
2207 case Intrinsic::exp:
2208 return ConstantFoldFP(exp, APF, Ty);
2209 case Intrinsic::exp2:
2211 return ConstantFoldBinaryFP(pow,
APFloat(2.0), APF, Ty);
2212 case Intrinsic::sin:
2213 return ConstantFoldFP(sin, APF, Ty);
2214 case Intrinsic::cos:
2215 return ConstantFoldFP(cos, APF, Ty);
2216 case Intrinsic::sqrt:
2217 return ConstantFoldFP(sqrt, APF, Ty);
2218 case Intrinsic::amdgcn_cos:
2219 case Intrinsic::amdgcn_sin: {
2220 double V = getValueAsDouble(Op);
2221 if (V < -256.0 || V > 256.0)
2226 bool IsCos = IntrinsicID == Intrinsic::amdgcn_cos;
2227 double V4 = V * 4.0;
2228 if (V4 == floor(V4)) {
2230 const double SinVals[4] = { 0.0, 1.0, 0.0, -1.0 };
2231 V = SinVals[((int)V4 + (IsCos ? 1 : 0)) & 3];
2238 return GetConstantFoldFPValue(V, Ty);
2254 case LibFunc_acos_finite:
2255 case LibFunc_acosf_finite:
2257 return ConstantFoldFP(acos, APF, Ty);
2261 case LibFunc_asin_finite:
2262 case LibFunc_asinf_finite:
2264 return ConstantFoldFP(asin, APF, Ty);
2269 return ConstantFoldFP(atan, APF, Ty);
2273 if (TLI->
has(Func)) {
2281 return ConstantFoldFP(cos, APF, Ty);
2285 case LibFunc_cosh_finite:
2286 case LibFunc_coshf_finite:
2288 return ConstantFoldFP(cosh, APF, Ty);
2292 case LibFunc_exp_finite:
2293 case LibFunc_expf_finite:
2295 return ConstantFoldFP(exp, APF, Ty);
2299 case LibFunc_exp2_finite:
2300 case LibFunc_exp2f_finite:
2303 return ConstantFoldBinaryFP(pow,
APFloat(2.0), APF, Ty);
2307 if (TLI->
has(Func)) {
2313 case LibFunc_floorf:
2314 if (TLI->
has(Func)) {
2321 case LibFunc_log_finite:
2322 case LibFunc_logf_finite:
2324 return ConstantFoldFP(log, APF, Ty);
2328 case LibFunc_log2_finite:
2329 case LibFunc_log2f_finite:
2332 return ConstantFoldFP(
log2, APF, Ty);
2335 case LibFunc_log10f:
2336 case LibFunc_log10_finite:
2337 case LibFunc_log10f_finite:
2340 return ConstantFoldFP(log10, APF, Ty);
2342 case LibFunc_nearbyint:
2343 case LibFunc_nearbyintf:
2346 if (TLI->
has(Func)) {
2352 case LibFunc_roundf:
2353 if (TLI->
has(Func)) {
2361 return ConstantFoldFP(sin, APF, Ty);
2365 case LibFunc_sinh_finite:
2366 case LibFunc_sinhf_finite:
2368 return ConstantFoldFP(sinh, APF, Ty);
2373 return ConstantFoldFP(sqrt, APF, Ty);
2378 return ConstantFoldFP(tan, APF, Ty);
2383 return ConstantFoldFP(tanh, APF, Ty);
2386 case LibFunc_truncf:
2387 if (TLI->
has(Func)) {
2396 if (
auto *Op = dyn_cast<ConstantInt>(
Operands[0])) {
2397 switch (IntrinsicID) {
2398 case Intrinsic::bswap:
2400 case Intrinsic::ctpop:
2402 case Intrinsic::bitreverse:
2404 case Intrinsic::convert_from_fp16: {
2405 APFloat Val(APFloat::IEEEhalf(),
Op->getValue());
2413 assert(status != APFloat::opInexact && !lost &&
2414 "Precision lost during fp16 constfolding");
2423 switch (IntrinsicID) {
2425 case Intrinsic::vector_reduce_add:
2426 case Intrinsic::vector_reduce_mul:
2427 case Intrinsic::vector_reduce_and:
2428 case Intrinsic::vector_reduce_or:
2429 case Intrinsic::vector_reduce_xor:
2430 case Intrinsic::vector_reduce_smin:
2431 case Intrinsic::vector_reduce_smax:
2432 case Intrinsic::vector_reduce_umin:
2433 case Intrinsic::vector_reduce_umax:
2440 if (isa<ConstantVector>(
Operands[0]) ||
2441 isa<ConstantDataVector>(
Operands[0])) {
2443 switch (IntrinsicID) {
2445 case Intrinsic::x86_sse_cvtss2si:
2446 case Intrinsic::x86_sse_cvtss2si64:
2447 case Intrinsic::x86_sse2_cvtsd2si:
2448 case Intrinsic::x86_sse2_cvtsd2si64:
2450 dyn_cast_or_null<ConstantFP>(
Op->getAggregateElement(0U)))
2451 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2455 case Intrinsic::x86_sse_cvttss2si:
2456 case Intrinsic::x86_sse_cvttss2si64:
2457 case Intrinsic::x86_sse2_cvttsd2si:
2458 case Intrinsic::x86_sse2_cvttsd2si64:
2460 dyn_cast_or_null<ConstantFP>(
Op->getAggregateElement(0U)))
2461 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2474 auto *FCmp = cast<ConstrainedFPCmpIntrinsic>(Call);
2476 if (FCmp->isSignaling()) {
2478 St = APFloat::opInvalidOp;
2481 St = APFloat::opInvalidOp;
2500 bool IsOp0Undef = isa<UndefValue>(
Operands[0]);
2501 bool IsOp1Undef = isa<UndefValue>(
Operands[1]);
2502 switch (IntrinsicID) {
2503 case Intrinsic::maxnum:
2504 case Intrinsic::minnum:
2505 case Intrinsic::maximum:
2506 case Intrinsic::minimum:
2516 if (
const auto *Op1 = dyn_cast<ConstantFP>(
Operands[0])) {
2517 const APFloat &Op1V = Op1->getValueAPF();
2519 if (
const auto *Op2 = dyn_cast<ConstantFP>(
Operands[1])) {
2520 if (Op2->getType() != Op1->getType())
2522 const APFloat &Op2V = Op2->getValueAPF();
2524 if (
const auto *ConstrIntr = dyn_cast<ConstrainedFPIntrinsic>(Call)) {
2528 switch (IntrinsicID) {
2531 case Intrinsic::experimental_constrained_fadd:
2532 St = Res.
add(Op2V, RM);
2534 case Intrinsic::experimental_constrained_fsub:
2537 case Intrinsic::experimental_constrained_fmul:
2540 case Intrinsic::experimental_constrained_fdiv:
2541 St = Res.
divide(Op2V, RM);
2543 case Intrinsic::experimental_constrained_frem:
2546 case Intrinsic::experimental_constrained_fcmp:
2547 case Intrinsic::experimental_constrained_fcmps:
2548 return evaluateCompare(Op1V, Op2V, ConstrIntr);
2556 switch (IntrinsicID) {
2559 case Intrinsic::copysign:
2561 case Intrinsic::minnum:
2563 case Intrinsic::maxnum:
2565 case Intrinsic::minimum:
2567 case Intrinsic::maximum:
2574 switch (IntrinsicID) {
2577 case Intrinsic::pow:
2578 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
2579 case Intrinsic::amdgcn_fmul_legacy:
2583 return ConstantFP::getNullValue(Ty);
2599 case LibFunc_pow_finite:
2600 case LibFunc_powf_finite:
2602 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
2606 if (TLI->
has(Func)) {
2607 APFloat V = Op1->getValueAPF();
2608 if (APFloat::opStatus::opOK == V.
mod(Op2->getValueAPF()))
2612 case LibFunc_remainder:
2613 case LibFunc_remainderf:
2614 if (TLI->
has(Func)) {
2615 APFloat V = Op1->getValueAPF();
2616 if (APFloat::opStatus::opOK == V.
remainder(Op2->getValueAPF()))
2621 case LibFunc_atan2f:
2627 case LibFunc_atan2_finite:
2628 case LibFunc_atan2f_finite:
2630 return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty);
2633 }
else if (
auto *Op2C = dyn_cast<ConstantInt>(
Operands[1])) {
2634 switch (IntrinsicID) {
2635 case Intrinsic::is_fpclass: {
2656 if (IntrinsicID == Intrinsic::powi && Ty->
isHalfTy())
2660 (
int)Op2C->getZExtValue())));
2661 if (IntrinsicID == Intrinsic::powi && Ty->
isFloatTy())
2665 (
int)Op2C->getZExtValue())));
2666 if (IntrinsicID == Intrinsic::powi && Ty->
isDoubleTy())
2670 (
int)Op2C->getZExtValue())));
2672 if (IntrinsicID == Intrinsic::amdgcn_ldexp) {
2678 APFloat::rmNearestTiesToEven);
2687 const APInt *C0, *C1;
2688 if (!getConstIntOrUndef(
Operands[0], C0) ||
2689 !getConstIntOrUndef(
Operands[1], C1))
2692 switch (IntrinsicID) {
2694 case Intrinsic::smax:
2695 case Intrinsic::smin:
2696 case Intrinsic::umax:
2697 case Intrinsic::umin:
2713 case Intrinsic::usub_with_overflow:
2714 case Intrinsic::ssub_with_overflow:
2720 case Intrinsic::uadd_with_overflow:
2721 case Intrinsic::sadd_with_overflow:
2726 cast<StructType>(Ty),
2731 case Intrinsic::smul_with_overflow:
2732 case Intrinsic::umul_with_overflow: {
2740 switch (IntrinsicID) {
2742 case Intrinsic::sadd_with_overflow:
2743 Res = C0->
sadd_ov(*C1, Overflow);
2745 case Intrinsic::uadd_with_overflow:
2746 Res = C0->
uadd_ov(*C1, Overflow);
2748 case Intrinsic::ssub_with_overflow:
2749 Res = C0->
ssub_ov(*C1, Overflow);
2751 case Intrinsic::usub_with_overflow:
2752 Res = C0->
usub_ov(*C1, Overflow);
2754 case Intrinsic::smul_with_overflow:
2755 Res = C0->
smul_ov(*C1, Overflow);
2757 case Intrinsic::umul_with_overflow:
2758 Res = C0->
umul_ov(*C1, Overflow);
2767 case Intrinsic::uadd_sat:
2768 case Intrinsic::sadd_sat:
2778 if (IntrinsicID == Intrinsic::uadd_sat)
2782 case Intrinsic::usub_sat:
2783 case Intrinsic::ssub_sat:
2793 if (IntrinsicID == Intrinsic::usub_sat)
2797 case Intrinsic::cttz:
2798 case Intrinsic::ctlz:
2799 assert(C1 &&
"Must be constant int");
2806 if (IntrinsicID == Intrinsic::cttz)
2811 case Intrinsic::abs:
2812 assert(C1 &&
"Must be constant int");
2830 if ((isa<ConstantVector>(
Operands[0]) ||
2831 isa<ConstantDataVector>(
Operands[0])) &&
2835 cast<ConstantInt>(
Operands[1])->getValue() == 4) {
2837 switch (IntrinsicID) {
2839 case Intrinsic::x86_avx512_vcvtss2si32:
2840 case Intrinsic::x86_avx512_vcvtss2si64:
2841 case Intrinsic::x86_avx512_vcvtsd2si32:
2842 case Intrinsic::x86_avx512_vcvtsd2si64:
2844 dyn_cast_or_null<ConstantFP>(
Op->getAggregateElement(0U)))
2845 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2849 case Intrinsic::x86_avx512_vcvtss2usi32:
2850 case Intrinsic::x86_avx512_vcvtss2usi64:
2851 case Intrinsic::x86_avx512_vcvtsd2usi32:
2852 case Intrinsic::x86_avx512_vcvtsd2usi64:
2854 dyn_cast_or_null<ConstantFP>(
Op->getAggregateElement(0U)))
2855 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2859 case Intrinsic::x86_avx512_cvttss2si:
2860 case Intrinsic::x86_avx512_cvttss2si64:
2861 case Intrinsic::x86_avx512_cvttsd2si:
2862 case Intrinsic::x86_avx512_cvttsd2si64:
2864 dyn_cast_or_null<ConstantFP>(
Op->getAggregateElement(0U)))
2865 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2869 case Intrinsic::x86_avx512_cvttss2usi:
2870 case Intrinsic::x86_avx512_cvttss2usi64:
2871 case Intrinsic::x86_avx512_cvttsd2usi:
2872 case Intrinsic::x86_avx512_cvttsd2usi64:
2874 dyn_cast_or_null<ConstantFP>(
Op->getAggregateElement(0U)))
2875 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2902 }
else if (
abs(S1) >=
abs(S0)) {
2925 switch (IntrinsicID) {
2928 case Intrinsic::amdgcn_cubeid:
2930 case Intrinsic::amdgcn_cubema:
2932 case Intrinsic::amdgcn_cubesc:
2934 case Intrinsic::amdgcn_cubetc:
2941 const APInt *C0, *C1, *C2;
2942 if (!getConstIntOrUndef(
Operands[0], C0) ||
2943 !getConstIntOrUndef(
Operands[1], C1) ||
2944 !getConstIntOrUndef(
Operands[2], C2))
2951 unsigned NumUndefBytes = 0;
2952 for (
unsigned I = 0;
I < 32;
I += 8) {
2961 const APInt *Src = ((Sel & 10) == 10 || (Sel & 12) == 4) ? C0 : C1;
2965 B = Src->extractBitsAsZExtValue(8, (Sel & 3) * 8);
2967 B = Src->extractBitsAsZExtValue(1, (Sel & 1) ? 31 : 15) * 0xff;
2970 Val.insertBits(
B,
I, 8);
2973 if (NumUndefBytes == 4)
2987 if (
const auto *Op1 = dyn_cast<ConstantFP>(
Operands[0])) {
2988 if (
const auto *Op2 = dyn_cast<ConstantFP>(
Operands[1])) {
2989 if (
const auto *Op3 = dyn_cast<ConstantFP>(
Operands[2])) {
2990 const APFloat &C1 = Op1->getValueAPF();
2991 const APFloat &C2 = Op2->getValueAPF();
2992 const APFloat &C3 = Op3->getValueAPF();
2994 if (
const auto *ConstrIntr = dyn_cast<ConstrainedFPIntrinsic>(Call)) {
2998 switch (IntrinsicID) {
3001 case Intrinsic::experimental_constrained_fma:
3002 case Intrinsic::experimental_constrained_fmuladd:
3006 if (mayFoldConstrained(
3012 switch (IntrinsicID) {
3014 case Intrinsic::amdgcn_fma_legacy: {
3024 case Intrinsic::fma:
3025 case Intrinsic::fmuladd: {
3030 case Intrinsic::amdgcn_cubeid:
3031 case Intrinsic::amdgcn_cubema:
3032 case Intrinsic::amdgcn_cubesc:
3033 case Intrinsic::amdgcn_cubetc: {
3034 APFloat V = ConstantFoldAMDGCNCubeIntrinsic(IntrinsicID, C1, C2, C3);
3042 if (IntrinsicID == Intrinsic::smul_fix ||
3043 IntrinsicID == Intrinsic::smul_fix_sat) {
3049 const APInt *C0, *C1;
3050 if (!getConstIntOrUndef(
Operands[0], C0) ||
3051 !getConstIntOrUndef(
Operands[1], C1))
3065 unsigned Scale = cast<ConstantInt>(
Operands[2])->getZExtValue();
3067 assert(Scale < Width &&
"Illegal scale.");
3068 unsigned ExtendedWidth =
Width * 2;
3070 (C0->
sext(ExtendedWidth) * C1->
sext(ExtendedWidth)).ashr(Scale);
3071 if (IntrinsicID == Intrinsic::smul_fix_sat) {
3080 if (IntrinsicID == Intrinsic::fshl || IntrinsicID == Intrinsic::fshr) {
3081 const APInt *C0, *C1, *C2;
3082 if (!getConstIntOrUndef(
Operands[0], C0) ||
3083 !getConstIntOrUndef(
Operands[1], C1) ||
3084 !getConstIntOrUndef(
Operands[2], C2))
3087 bool IsRight = IntrinsicID == Intrinsic::fshr;
3101 unsigned LshrAmt = IsRight ? ShAmt :
BitWidth - ShAmt;
3102 unsigned ShlAmt = !IsRight ? ShAmt :
BitWidth - ShAmt;
3110 if (IntrinsicID == Intrinsic::amdgcn_perm)
3111 return ConstantFoldAMDGCNPermIntrinsic(
Operands, Ty);
3123 return ConstantFoldScalarCall1(
Name, IntrinsicID, Ty,
Operands, TLI, Call);
3126 return ConstantFoldScalarCall2(
Name, IntrinsicID, Ty,
Operands, TLI, Call);
3129 return ConstantFoldScalarCall3(
Name, IntrinsicID, Ty,
Operands, TLI, Call);
3134static Constant *ConstantFoldFixedVectorCall(
3142 switch (IntrinsicID) {
3143 case Intrinsic::masked_load: {
3152 auto *MaskElt =
Mask->getAggregateElement(
I);
3155 auto *PassthruElt = Passthru->getAggregateElement(
I);
3157 if (isa<UndefValue>(MaskElt)) {
3165 if (MaskElt->isNullValue()) {
3169 }
else if (MaskElt->isOneValue()) {
3181 case Intrinsic::arm_mve_vctp8:
3182 case Intrinsic::arm_mve_vctp16:
3183 case Intrinsic::arm_mve_vctp32:
3184 case Intrinsic::arm_mve_vctp64: {
3185 if (
auto *Op = dyn_cast<ConstantInt>(
Operands[0])) {
3190 for (
unsigned i = 0; i < Lanes; i++) {
3200 case Intrinsic::get_active_lane_mask: {
3201 auto *Op0 = dyn_cast<ConstantInt>(
Operands[0]);
3202 auto *Op1 = dyn_cast<ConstantInt>(
Operands[1]);
3206 uint64_t Limit = Op1->getZExtValue();
3209 for (
unsigned i = 0; i < Lanes; i++) {
3210 if (
Base + i < Limit)
3225 for (
unsigned J = 0, JE =
Operands.size(); J != JE; ++J) {
3241 ConstantFoldScalarCall(
Name, IntrinsicID, Ty, Lane, TLI, Call);
3250static Constant *ConstantFoldScalableVectorCall(
3254 switch (IntrinsicID) {
3255 case Intrinsic::aarch64_sve_convert_from_svbool: {
3256 auto *Src = dyn_cast<Constant>(
Operands[0]);
3257 if (!Src || !Src->isNullValue())
3273 if (Call->isNoBuiltin())
3288 Type *Ty =
F->getReturnType();
3289 if (
auto *FVTy = dyn_cast<FixedVectorType>(Ty))
3290 return ConstantFoldFixedVectorCall(
3292 F->getParent()->getDataLayout(), TLI, Call);
3294 if (
auto *SVTy = dyn_cast<ScalableVectorType>(Ty))
3295 return ConstantFoldScalableVectorCall(
3297 F->getParent()->getDataLayout(), TLI, Call);
3302 return ConstantFoldScalarCall(
Name,
F->getIntrinsicID(), Ty,
Operands, TLI,
3310 if (Call->isNoBuiltin() || Call->isStrictFP())
3312 Function *
F = Call->getCalledFunction();
3320 if (Call->arg_size() == 1) {
3321 if (
ConstantFP *OpC = dyn_cast<ConstantFP>(Call->getArgOperand(0))) {
3322 const APFloat &Op = OpC->getValueAPF();
3330 case LibFunc_log10l:
3332 case LibFunc_log10f:
3333 return Op.isNaN() || (!Op.isZero() && !Op.isNegative());
3339 if (OpC->getType()->isDoubleTy())
3341 if (OpC->getType()->isFloatTy())
3349 if (OpC->getType()->isDoubleTy())
3351 if (OpC->getType()->isFloatTy())
3361 return !Op.isInfinity();
3365 case LibFunc_tanf: {
3368 Type *Ty = OpC->getType();
3370 return ConstantFoldFP(tan, OpC->getValueAPF(), Ty) !=
nullptr;
3387 return !(Op <
APFloat(Op.getSemantics(),
"-1") ||
3388 Op >
APFloat(Op.getSemantics(),
"1"));
3397 if (OpC->getType()->isDoubleTy())
3399 if (OpC->getType()->isFloatTy())
3406 return Op.isNaN() || Op.isZero() || !Op.isNegative();
3416 if (Call->arg_size() == 2) {
3417 ConstantFP *Op0C = dyn_cast<ConstantFP>(Call->getArgOperand(0));
3418 ConstantFP *Op1C = dyn_cast<ConstantFP>(Call->getArgOperand(1));
3426 case LibFunc_powf: {
3432 return ConstantFoldBinaryFP(pow, Op0, Op1, Ty) !=
nullptr;
3440 case LibFunc_remainderl:
3441 case LibFunc_remainder:
3442 case LibFunc_remainderf:
3447 case LibFunc_atan2f:
3448 case LibFunc_atan2l:
3464void TargetFolder::anchor() {}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
This file implements the APSInt class, which is a simple class that represents an arbitrary sized int...
SmallVector< MachineOperand, 4 > Cond
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static Constant * FoldBitCast(Constant *V, Type *DestTy)
Constant * getConstantAtOffset(Constant *Base, APInt Offset, const DataLayout &DL)
If this Offset points exactly to the start of an aggregate element, return that element,...
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file defines the DenseMap class.
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
amode Optimize addressing mode
static M68kRelType getType(unsigned Kind, MCSymbolRefExpr::VariantKind &Modifier, bool &IsPCRel)
mir Rename Register Operands
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallVector class.
static APFloat getQNaN(const fltSemantics &Sem, bool Negative=false, const APInt *payload=nullptr)
Factory for QNaN values.
opStatus divide(const APFloat &RHS, roundingMode RM)
void copySign(const APFloat &RHS)
opStatus convert(const fltSemantics &ToSemantics, roundingMode RM, bool *losesInfo)
opStatus subtract(const APFloat &RHS, roundingMode RM)
double convertToDouble() const
Converts this APFloat to host double value.
opStatus add(const APFloat &RHS, roundingMode RM)
const fltSemantics & getSemantics() const
opStatus multiply(const APFloat &RHS, roundingMode RM)
opStatus fusedMultiplyAdd(const APFloat &Multiplicand, const APFloat &Addend, roundingMode RM)
opStatus remainder(const APFloat &RHS)
APInt bitcastToAPInt() const
opStatus convertToInteger(MutableArrayRef< integerPart > Input, unsigned int Width, bool IsSigned, roundingMode RM, bool *IsExact) const
opStatus mod(const APFloat &RHS)
opStatus roundToIntegral(roundingMode RM)
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
Class for arbitrary precision integers.
APInt umul_ov(const APInt &RHS, bool &Overflow) const
APInt usub_sat(const APInt &RHS) const
bool isMinSignedValue() const
Determine if this is the smallest signed value.
uint64_t extractBitsAsZExtValue(unsigned numBits, unsigned bitPosition) const
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
APInt abs() const
Get the absolute value.
APInt sadd_sat(const APInt &RHS) const
APInt usub_ov(const APInt &RHS, bool &Overflow) const
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
APInt urem(const APInt &RHS) const
Unsigned remainder operation.
unsigned getBitWidth() const
Return the number of bits in the APInt.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
APInt sadd_ov(const APInt &RHS, bool &Overflow) const
APInt uadd_ov(const APInt &RHS, bool &Overflow) const
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
unsigned countTrailingZeros() const
Count the number of trailing zero bits.
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
unsigned countLeadingZeros() const
The APInt version of the countLeadingZeros functions in MathExtras.h.
APInt uadd_sat(const APInt &RHS) const
APInt smul_ov(const APInt &RHS, bool &Overflow) const
APInt sext(unsigned width) const
Sign extend to a new width.
APInt shl(unsigned shiftAmt) const
Left-shift function.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
APInt ssub_ov(const APInt &RHS, bool &Overflow) const
bool isOne() const
Determine if this is a value of 1.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
APInt ssub_sat(const APInt &RHS) const
An arbitrary precision integer that knows its signedness.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & back() const
back - Get the last element.
size_t size() const
size - Get the array size.
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
static Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)
Returns the opcode necessary to cast Val into Ty using usual casting rules.
static bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
static Constant * get(LLVMContext &Context, ArrayRef< ElementTy > Elts)
get() constructor - Return a constant with array type with an element count and element type matching...
static Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getExtractElement(Constant *Vec, Constant *Idx, Type *OnlyIfReducedTy=nullptr)
static Constant * getPointerCast(Constant *C, Type *Ty)
Create a BitCast, AddrSpaceCast, or a PtrToInt cast constant expression.
static Constant * getCast(unsigned ops, Constant *C, Type *Ty, bool OnlyIfReduced=false)
Convenience function for getting a Cast operation.
static Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static Constant * getZExt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getInsertElement(Constant *Vec, Constant *Elt, Constant *Idx, Type *OnlyIfReducedTy=nullptr)
static Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getShuffleVector(Constant *V1, Constant *V2, ArrayRef< int > Mask, Type *OnlyIfReducedTy=nullptr)
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, bool InBounds=false, std::optional< unsigned > InRangeIndex=std::nullopt, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
static Constant * getSelect(Constant *C, Constant *V1, Constant *V2, Type *OnlyIfReducedTy=nullptr)
Select constant expr.
static Constant * getIntegerCast(Constant *C, Type *Ty, bool IsSigned)
Create a ZExt, Bitcast or Trunc for integer -> integer casts.
static Constant * getShl(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static Constant * getLShr(Constant *C1, Constant *C2, bool isExact=false)
static Constant * get(unsigned Opcode, Constant *C1, Constant *C2, unsigned Flags=0, Type *OnlyIfReducedTy=nullptr)
get - Return a binary or shift operator constant expression, folding if possible.
static bool isDesirableBinOp(unsigned Opcode)
Whether creating a constant expression for this binary operator is desirable.
static Constant * getOr(Constant *C1, Constant *C2)
static Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getCompare(unsigned short pred, Constant *C1, Constant *C2, bool OnlyIfReduced=false)
Return an ICmp or FCmp comparison operator constant expression.
ConstantFP - Floating Point Values [float, double].
const APFloat & getValueAPF() const
static Constant * get(Type *Ty, double V)
This returns a ConstantFP, or a vector containing a splat of a ConstantFP, for the specified value in...
static ConstantInt * getTrue(LLVMContext &Context)
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
static ConstantInt * getFalse(LLVMContext &Context)
static Constant * get(StructType *T, ArrayRef< Constant * > V)
static Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
static Constant * getAllOnesValue(Type *Ty)
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Constrained floating point compare intrinsics.
This is the common base class for constrained floating point intrinsics.
std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
std::optional< RoundingMode > getRoundingMode() const
Wrapper for a function that represents a value that functionally represents the original function.
A parsed version of the target data layout string in and methods for querying it.
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
static bool compare(const APFloat &LHS, const APFloat &RHS, FCmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
Class to represent fixed width SIMD vectors.
unsigned getNumElements() const
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
DenormalMode getDenormalMode(const fltSemantics &FPType) const
Returns the denormal handling type for the default rounding mode of the function.
Type * getSourceElementType() const
std::optional< unsigned > getInRangeIndex() const
Returns the offset of the index with an inrange attachment, or std::nullopt if none.
static Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
static Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
Module * getParent()
Get the module that this global value is contained inside of...
PointerType * getType() const
Global values are always pointers.
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
static bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
const BasicBlock * getParent() const
const Function * getFunction() const
Return the function this instruction belongs to.
Class to represent integer types.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
static APInt getSaturationPoint(Intrinsic::ID ID, unsigned numBits)
Min/max intrinsics are monotonic, they operate on a fixed-bitwidth values, so there is a certain thre...
ICmpInst::Predicate getPredicate() const
Returns the comparison predicate underlying the intrinsic.
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Class to represent scalable SIMD vectors.
void push_back(const T &Elt)
pointer data()
Return a pointer to the vector's buffer, even if empty().
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
uint64_t getElementOffset(unsigned Idx) const
unsigned getElementContainingOffset(uint64_t Offset) const
Given a valid byte offset into the structure, returns the structure index that contains it.
Provides information about what library functions are available for the current target.
bool has(LibFunc F) const
Tests whether a library function is available.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
The instances of the Type class are immutable: once they are created, they are never changed.
unsigned getIntegerBitWidth() const
Type * getStructElementType(unsigned N) const
const fltSemantics & getFltSemantics() const
bool isVectorTy() const
True if this is an instance of VectorType.
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
bool isPointerTy() const
True if this is an instance of PointerType.
static IntegerType * getInt1Ty(LLVMContext &C)
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
Type * getNonOpaquePointerElementType() const
Only use this method in code that is not reachable with opaque pointers, or part of deprecated method...
bool isBFloatTy() const
Return true if this is 'bfloat', a 16-bit bfloat type.
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isX86_MMXTy() const
Return true if this is X86 MMX.
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool isStructTy() const
True if this is an instance of StructType.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
static IntegerType * getInt16Ty(LLVMContext &C)
bool isAggregateType() const
Return true if the type is an aggregate type.
bool isHalfTy() const
Return true if this is 'half', a 16-bit IEEE fp type.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
static IntegerType * getInt8Ty(LLVMContext &C)
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
bool isX86_AMXTy() const
Return true if this is X86 AMX.
static IntegerType * getInt32Ty(LLVMContext &C)
static IntegerType * getInt64Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isIEEELikeFPTy() const
Return true if this is a well-behaved IEEE-like type, which has a IEEE compatible layout as defined b...
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
LLVMContext & getContext() const
All values hold a context through their type.
Type * getElementType() const
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
const APInt & smin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be signed.
const APInt & smax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be signed.
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
const APInt & umax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be unsigned.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
@ SC
CHAIN = SC CHAIN, Imm128 - System call.
@ CE
Windows NT (Windows on ARM)
@ ebStrict
This corresponds to "fpexcept.strict".
@ ebIgnore
This corresponds to "fpexcept.ignore".
std::error_code status(const Twine &path, file_status &result, bool follow=true)
Get file status as if by POSIX stat().
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Constant * ConstantFoldLoadThroughBitcast(Constant *C, Type *DestTy, const DataLayout &DL)
ConstantFoldLoadThroughBitcast - try to cast constant to destination type returning null if unsuccess...
static double log2(double V)
bool canConstantFoldCallTo(const CallBase *Call, const Function *F)
canConstantFoldCallTo - Return true if its even possible to fold a call to the specified function.
APFloat abs(APFloat X)
Returns the absolute value of the argument.
Constant * ConstantFoldFPInstOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL, const Instruction *I)
Attempt to constant fold a floating point binary operation with the specified operands,...
Constant * ConstantFoldUnaryInstruction(unsigned Opcode, Constant *V)
bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, APInt &Offset, const DataLayout &DL, DSOLocalEquivalent **DSOEquiv=nullptr)
If this constant is a constant offset from a global, return the global and the constant.
bool isMathLibCallNoop(const CallBase *Call, const TargetLibraryInfo *TLI)
Check whether the given call has no side-effects.
Constant * ReadByteArrayFromGlobal(const GlobalVariable *GV, uint64_t Offset)
LLVM_READONLY APFloat maximum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2018 maximum semantics.
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments and pointer casts from the specified value,...
Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
Constant * ConstantFoldExtractValueInstruction(Constant *Agg, ArrayRef< unsigned > Idxs)
Attempt to constant fold an extractvalue instruction with the specified operands and indices.
Constant * ConstantFoldCall(const CallBase *Call, Function *F, ArrayRef< Constant * > Operands, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldCall - Attempt to constant fold a call to the specified function with the specified argum...
Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Fold the constant using the specified DataLayout.
LLVM_READONLY APFloat maxnum(const APFloat &A, const APFloat &B)
Implements IEEE maxNum semantics.
Constant * ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op, const DataLayout &DL)
Attempt to constant fold a unary operation with the specified operand.
Constant * FlushFPConstant(Constant *Operand, const Instruction *I, bool IsOutput)
Attempt to flush float point constant according to denormal mode set in the instruction's parent func...
Constant * ConstantFoldInstOperands(Instruction *I, ArrayRef< Constant * > Ops, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldInstOperands - Attempt to constant fold an instruction with the specified operands.
APFloat scalbn(APFloat X, int Exp, APFloat::roundingMode RM)
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
Constant * ConstantFoldLoadFromConst(Constant *C, Type *Ty, const APInt &Offset, const DataLayout &DL)
Extract value of C at the given Offset reinterpreted as Ty.
Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
Constant * ConstantFoldLoadFromUniformValue(Constant *C, Type *Ty)
If C is a uniform value where all bits are the same (either all zero, all ones, all undef or all pois...
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, OptimizationRemarkEmitter *ORE=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_READONLY APFloat minnum(const APFloat &A, const APFloat &B)
Implements IEEE minNum semantics.
Constant * ConstantFoldInstruction(Instruction *I, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldInstruction - Try to constant fold the specified instruction.
RoundingMode
Rounding mode.
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
constexpr unsigned BitWidth
bool isVectorIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, unsigned ScalarOpdIdx)
Identifies if the vector form of the intrinsic has a scalar operand.
Constant * ConstantFoldInsertValueInstruction(Constant *Agg, Constant *Val, ArrayRef< unsigned > Idxs)
ConstantFoldInsertValueInstruction - Attempt to constant fold an insertvalue instruction with the spe...
Constant * ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, APInt Offset, const DataLayout &DL)
Return the value that a load from C with offset Offset would produce if it is constant and determinab...
LLVM_READONLY APFloat minimum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2018 minimum semantics.
Constant * ConstantFoldBinaryInstruction(unsigned Opcode, Constant *V1, Constant *V2)
opStatus
IEEE-754R 7: Default exception handling.
Represent subnormal handling kind for floating point instruction inputs and outputs.
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
DenormalModeKind
Represent handled modes for denormal (aka subnormal) modes in the floating point environment.
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
@ PositiveZero
Denormals are flushed to positive zero.
@ IEEE
IEEE-754 denormal numbers preserved.
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
static constexpr DenormalMode getIEEE()
bool isConstant() const
Returns true if we know the value of all bits.
const APInt & getConstant() const
Returns the value when all bits have a known value.