31#include "llvm/Config/config.h"
45#include "llvm/IR/IntrinsicsAArch64.h"
46#include "llvm/IR/IntrinsicsAMDGPU.h"
47#include "llvm/IR/IntrinsicsARM.h"
48#include "llvm/IR/IntrinsicsWebAssembly.h"
49#include "llvm/IR/IntrinsicsX86.h"
77 unsigned BitShift =
DL.getTypeSizeInBits(SrcEltTy);
78 for (
unsigned i = 0; i != NumSrcElts; ++i) {
80 if (
DL.isLittleEndian())
81 Element =
C->getAggregateElement(NumSrcElts - i - 1);
83 Element =
C->getAggregateElement(i);
85 if (Element && isa<UndefValue>(Element)) {
90 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
95 Result |= ElementCI->getValue().zext(
Result.getBitWidth());
106 "Invalid constantexpr bitcast!");
112 if (
auto *VTy = dyn_cast<VectorType>(
C->getType())) {
115 unsigned NumSrcElts = cast<FixedVectorType>(VTy)->getNumElements();
116 Type *SrcEltTy = VTy->getElementType();
129 if (
Constant *CE = foldConstVectorToAPInt(Result, DestTy,
C,
130 SrcEltTy, NumSrcElts,
DL))
133 if (isa<IntegerType>(DestTy))
142 auto *DestVTy = dyn_cast<VectorType>(DestTy);
148 if (isa<ConstantFP>(
C) || isa<ConstantInt>(
C)) {
154 if (!isa<ConstantDataVector>(
C) && !isa<ConstantVector>(
C))
158 unsigned NumDstElt = cast<FixedVectorType>(DestVTy)->getNumElements();
159 unsigned NumSrcElt = cast<FixedVectorType>(
C->getType())->getNumElements();
160 if (NumDstElt == NumSrcElt)
163 Type *SrcEltTy = cast<VectorType>(
C->getType())->getElementType();
164 Type *DstEltTy = DestVTy->getElementType();
197 if (!isa<ConstantVector>(
C) &&
198 !isa<ConstantDataVector>(
C))
206 bool isLittleEndian =
DL.isLittleEndian();
209 if (NumDstElt < NumSrcElt) {
212 unsigned Ratio = NumSrcElt/NumDstElt;
215 for (
unsigned i = 0; i != NumDstElt; ++i) {
218 unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1);
219 for (
unsigned j = 0;
j != Ratio; ++
j) {
220 Constant *Src =
C->getAggregateElement(SrcElt++);
221 if (Src && isa<UndefValue>(Src))
223 cast<VectorType>(
C->getType())->getElementType());
225 Src = dyn_cast_or_null<ConstantInt>(Src);
235 ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
246 unsigned Ratio = NumDstElt/NumSrcElt;
247 unsigned DstBitSize =
DL.getTypeSizeInBits(DstEltTy);
250 for (
unsigned i = 0; i != NumSrcElt; ++i) {
251 auto *Element =
C->getAggregateElement(i);
256 if (isa<UndefValue>(Element)) {
262 auto *Src = dyn_cast<ConstantInt>(Element);
266 unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1);
267 for (
unsigned j = 0;
j != Ratio; ++
j) {
272 ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
302 if ((GV = dyn_cast<GlobalValue>(
C))) {
308 if (
auto *FoundDSOEquiv = dyn_cast<DSOLocalEquivalent>(
C)) {
310 *DSOEquiv = FoundDSOEquiv;
311 GV = FoundDSOEquiv->getGlobalValue();
318 auto *CE = dyn_cast<ConstantExpr>(
C);
319 if (!CE)
return false;
322 if (CE->getOpcode() == Instruction::PtrToInt ||
323 CE->getOpcode() == Instruction::BitCast)
328 auto *
GEP = dyn_cast<GEPOperator>(CE);
332 unsigned BitWidth =
DL.getIndexTypeSizeInBits(
GEP->getType());
341 if (!
GEP->accumulateConstantOffset(
DL, TmpOffset))
351 Type *SrcTy =
C->getType();
355 TypeSize DestSize =
DL.getTypeSizeInBits(DestTy);
356 TypeSize SrcSize =
DL.getTypeSizeInBits(SrcTy);
357 if (!TypeSize::isKnownGE(SrcSize, DestSize))
368 if (SrcSize == DestSize &&
375 Cast = Instruction::IntToPtr;
377 Cast = Instruction::PtrToInt;
398 ElemC =
C->getAggregateElement(Elem++);
399 }
while (ElemC &&
DL.getTypeSizeInBits(ElemC->
getType()).isZero());
404 if (
auto *VT = dyn_cast<VectorType>(SrcTy))
405 if (!
DL.typeSizeEqualsStoreSize(VT->getElementType()))
408 C =
C->getAggregateElement(0u);
423 assert(ByteOffset <=
DL.getTypeAllocSize(
C->getType()) &&
424 "Out of range access");
428 if (isa<ConstantAggregateZero>(
C) || isa<UndefValue>(
C))
431 if (
auto *CI = dyn_cast<ConstantInt>(
C)) {
432 if ((CI->getBitWidth() & 7) != 0)
434 const APInt &Val = CI->getValue();
435 unsigned IntBytes =
unsigned(CI->getBitWidth()/8);
437 for (
unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) {
438 unsigned n = ByteOffset;
439 if (!
DL.isLittleEndian())
440 n = IntBytes - n - 1;
447 if (
auto *CFP = dyn_cast<ConstantFP>(
C)) {
448 if (CFP->getType()->isDoubleTy()) {
450 return ReadDataFromGlobal(
C, ByteOffset, CurPtr, BytesLeft,
DL);
452 if (CFP->getType()->isFloatTy()){
454 return ReadDataFromGlobal(
C, ByteOffset, CurPtr, BytesLeft,
DL);
456 if (CFP->getType()->isHalfTy()){
458 return ReadDataFromGlobal(
C, ByteOffset, CurPtr, BytesLeft,
DL);
463 if (
auto *CS = dyn_cast<ConstantStruct>(
C)) {
467 ByteOffset -= CurEltOffset;
472 uint64_t EltSize =
DL.getTypeAllocSize(CS->getOperand(
Index)->getType());
474 if (ByteOffset < EltSize &&
475 !ReadDataFromGlobal(CS->getOperand(
Index), ByteOffset, CurPtr,
482 if (
Index == CS->getType()->getNumElements())
488 if (BytesLeft <= NextEltOffset - CurEltOffset - ByteOffset)
492 CurPtr += NextEltOffset - CurEltOffset - ByteOffset;
493 BytesLeft -= NextEltOffset - CurEltOffset - ByteOffset;
495 CurEltOffset = NextEltOffset;
500 if (isa<ConstantArray>(
C) || isa<ConstantVector>(
C) ||
501 isa<ConstantDataSequential>(
C)) {
504 if (
auto *AT = dyn_cast<ArrayType>(
C->getType())) {
505 NumElts = AT->getNumElements();
506 EltTy = AT->getElementType();
507 EltSize =
DL.getTypeAllocSize(EltTy);
509 NumElts = cast<FixedVectorType>(
C->getType())->getNumElements();
510 EltTy = cast<FixedVectorType>(
C->getType())->getElementType();
513 if (!
DL.typeSizeEqualsStoreSize(EltTy))
516 EltSize =
DL.getTypeStoreSize(EltTy);
522 if (!ReadDataFromGlobal(
C->getAggregateElement(
Index),
Offset, CurPtr,
527 assert(BytesWritten <= EltSize &&
"Not indexing into this element?");
528 if (BytesWritten >= BytesLeft)
532 BytesLeft -= BytesWritten;
533 CurPtr += BytesWritten;
538 if (
auto *CE = dyn_cast<ConstantExpr>(
C)) {
539 if (
CE->getOpcode() == Instruction::IntToPtr &&
540 CE->getOperand(0)->getType() ==
DL.getIntPtrType(
CE->getType())) {
541 return ReadDataFromGlobal(
CE->getOperand(0), ByteOffset, CurPtr,
553 if (isa<ScalableVectorType>(LoadTy))
556 auto *IntType = dyn_cast<IntegerType>(LoadTy);
569 DL.getTypeSizeInBits(LoadTy).getFixedValue());
571 if (Res->isNullValue() && !LoadTy->
isX86_MMXTy() &&
579 if (Res->isNullValue() && !LoadTy->
isX86_MMXTy() &&
592 unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8;
593 if (BytesLoaded > 32 || BytesLoaded == 0)
597 if (
Offset <= -1 *
static_cast<int64_t
>(BytesLoaded))
601 TypeSize InitializerSize =
DL.getTypeAllocSize(
C->getType());
609 unsigned char RawBytes[32] = {0};
610 unsigned char *CurPtr = RawBytes;
611 unsigned BytesLeft = BytesLoaded;
620 if (!ReadDataFromGlobal(
C,
Offset, CurPtr, BytesLeft,
DL))
623 APInt ResultVal =
APInt(IntType->getBitWidth(), 0);
624 if (
DL.isLittleEndian()) {
625 ResultVal = RawBytes[BytesLoaded - 1];
626 for (
unsigned i = 1; i != BytesLoaded; ++i) {
628 ResultVal |= RawBytes[BytesLoaded - 1 - i];
631 ResultVal = RawBytes[0];
632 for (
unsigned i = 1; i != BytesLoaded; ++i) {
634 ResultVal |= RawBytes[i];
658 if (NBytes > UINT16_MAX)
666 unsigned char *CurPtr = RawBytes.
data();
668 if (!ReadDataFromGlobal(
Init,
Offset, CurPtr, NBytes,
DL))
681 if (!isa<ConstantAggregate>(
Base) && !isa<ConstantDataSequential>(
Base))
686 if (!
Offset.isZero() || !Indices[0].isZero())
691 if (
Index.isNegative() ||
Index.getActiveBits() >= 32)
694 C =
C->getAggregateElement(
Index.getZExtValue());
720 if (
Offset.getSignificantBits() <= 64)
722 FoldReinterpretLoadFromConst(
C, Ty,
Offset.getSExtValue(),
DL))
739 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
742 C = cast<Constant>(
C->stripAndAccumulateConstantOffsets(
762 if (isa<PoisonValue>(
C))
764 if (isa<UndefValue>(
C))
768 if (
C->isAllOnesValue() &&
788 if (Opc == Instruction::And) {
791 if ((Known1.
One | Known0.
Zero).isAllOnes()) {
795 if ((Known0.
One | Known1.
Zero).isAllOnes()) {
807 if (Opc == Instruction::Sub) {
813 unsigned OpSize =
DL.getTypeSizeInBits(Op0->
getType());
829 Type *ResultTy,
bool InBounds,
830 std::optional<unsigned> InRangeIndex,
832 Type *IntIdxTy =
DL.getIndexType(ResultTy);
837 for (
unsigned i = 1, e = Ops.
size(); i != e; ++i) {
840 SrcElemTy, Ops.
slice(1, i - 1)))) &&
841 Ops[i]->getType()->getScalarType() != IntIdxScalarTy) {
843 Type *NewType = Ops[i]->getType()->isVectorTy()
859 SrcElemTy, Ops[0], NewIdxs, InBounds, InRangeIndex);
865 assert(
Ptr->getType()->isPointerTy() &&
"Not a pointer type");
866 auto *OldPtrTy = cast<PointerType>(
Ptr->getType());
867 Ptr = cast<Constant>(
Ptr->stripPointerCasts());
868 auto *NewPtrTy = cast<PointerType>(
Ptr->getType());
871 if (NewPtrTy->getAddressSpace() != OldPtrTy->getAddressSpace()) {
873 Ptr, PointerType::getWithSamePointeeType(NewPtrTy,
874 OldPtrTy->getAddressSpace()));
885 bool InBounds =
GEP->isInBounds();
887 Type *SrcElemTy =
GEP->getSourceElementType();
888 Type *ResElemTy =
GEP->getResultElementType();
890 if (!SrcElemTy->
isSized() || isa<ScalableVectorType>(SrcElemTy))
893 if (
Constant *
C = CastGEPIndices(SrcElemTy, Ops, ResTy,
894 GEP->isInBounds(),
GEP->getInRangeIndex(),
899 if (!
Ptr->getType()->isPointerTy())
902 Type *IntIdxTy =
DL.getIndexType(
Ptr->getType());
904 for (
unsigned i = 1, e = Ops.
size(); i != e; ++i)
905 if (!isa<ConstantInt>(Ops[i]))
908 unsigned BitWidth =
DL.getTypeSizeInBits(IntIdxTy);
911 DL.getIndexedOffsetInType(
913 Ptr = StripPtrCastKeepAS(
Ptr);
916 while (
auto *
GEP = dyn_cast<GEPOperator>(
Ptr)) {
918 InBounds &=
GEP->isInBounds();
923 bool AllConstantInt =
true;
924 for (
Value *NestedOp : NestedOps)
925 if (!isa<ConstantInt>(NestedOp)) {
926 AllConstantInt =
false;
932 Ptr = cast<Constant>(
GEP->getOperand(0));
933 SrcElemTy =
GEP->getSourceElementType();
935 Ptr = StripPtrCastKeepAS(
Ptr);
941 if (
auto *CE = dyn_cast<ConstantExpr>(
Ptr)) {
942 if (
CE->getOpcode() == Instruction::IntToPtr) {
943 if (
auto *
Base = dyn_cast<ConstantInt>(
CE->getOperand(0)))
948 auto *PTy = cast<PointerType>(
Ptr->getType());
949 if ((
Ptr->isNullValue() || BasePtr != 0) &&
950 !
DL.isNonIntegralPointerType(PTy)) {
962 if (
auto *GV = dyn_cast<GlobalValue>(
Ptr))
963 SrcElemTy = GV->getValueType();
964 else if (!PTy->isOpaque())
972 Type *ElemTy = SrcElemTy;
981 while (ElemTy != ResElemTy) {
997 std::optional<unsigned> InRangeIndex;
998 if (std::optional<unsigned> LastIRIndex = InnermostGEP->
getInRangeIndex())
1000 NewIdxs.
size() > *LastIRIndex) {
1001 InRangeIndex = LastIRIndex;
1002 for (
unsigned I = 0;
I <= *LastIRIndex; ++
I)
1009 InBounds, InRangeIndex);
1011 cast<PointerType>(
C->getType())->isOpaqueOrPointeeTypeMatches(ElemTy) &&
1012 "Computed GetElementPtr has unexpected type!");
1016 if (
C->getType() != ResTy)
1027Constant *ConstantFoldInstOperandsImpl(
const Value *InstOrCE,
unsigned Opcode,
1040 case Instruction::FAdd:
1041 case Instruction::FSub:
1042 case Instruction::FMul:
1043 case Instruction::FDiv:
1044 case Instruction::FRem:
1048 if (
const auto *
I = dyn_cast<Instruction>(InstOrCE)) {
1058 if (
auto *
GEP = dyn_cast<GEPOperator>(InstOrCE)) {
1059 Type *SrcElemTy =
GEP->getSourceElementType();
1068 GEP->getInRangeIndex());
1071 if (
auto *CE = dyn_cast<ConstantExpr>(InstOrCE)) {
1072 if (
CE->isCompare())
1075 return CE->getWithOperands(Ops);
1079 default:
return nullptr;
1080 case Instruction::ICmp:
1081 case Instruction::FCmp: {
1082 auto *
C = cast<CmpInst>(InstOrCE);
1086 case Instruction::Freeze:
1088 case Instruction::Call:
1089 if (
auto *
F = dyn_cast<Function>(Ops.
back())) {
1090 const auto *
Call = cast<CallBase>(InstOrCE);
1095 case Instruction::Select:
1097 case Instruction::ExtractElement:
1099 case Instruction::ExtractValue:
1101 Ops[0], cast<ExtractValueInst>(InstOrCE)->getIndices());
1102 case Instruction::InsertElement:
1104 case Instruction::InsertValue:
1106 Ops[0], Ops[1], cast<InsertValueInst>(InstOrCE)->getIndices());
1107 case Instruction::ShuffleVector:
1109 Ops[0], Ops[1], cast<ShuffleVectorInst>(InstOrCE)->getShuffleMask());
1110 case Instruction::Load: {
1111 const auto *LI = dyn_cast<LoadInst>(InstOrCE);
1112 if (LI->isVolatile())
1131 if (!isa<ConstantVector>(
C) && !isa<ConstantExpr>(
C))
1135 for (
const Use &OldU :
C->operands()) {
1136 Constant *OldC = cast<Constant>(&OldU);
1140 if (isa<ConstantVector>(OldC) || isa<ConstantExpr>(OldC)) {
1141 auto It = FoldedOps.
find(OldC);
1142 if (It == FoldedOps.
end()) {
1143 NewC = ConstantFoldConstantImpl(OldC,
DL, TLI, FoldedOps);
1144 FoldedOps.
insert({OldC, NewC});
1152 if (
auto *CE = dyn_cast<ConstantExpr>(
C)) {
1154 ConstantFoldInstOperandsImpl(CE,
CE->getOpcode(), Ops,
DL, TLI))
1159 assert(isa<ConstantVector>(
C));
1168 if (
auto *PN = dyn_cast<PHINode>(
I)) {
1172 for (
Value *Incoming : PN->incoming_values()) {
1177 if (isa<UndefValue>(Incoming))
1180 auto *
C = dyn_cast<Constant>(Incoming);
1184 C = ConstantFoldConstantImpl(
C,
DL, TLI, FoldedOps);
1187 if (CommonValue &&
C != CommonValue)
1198 if (!
all_of(
I->operands(), [](
Use &U) { return isa<Constant>(U); }))
1203 for (
const Use &OpU :
I->operands()) {
1204 auto *Op = cast<Constant>(&OpU);
1206 Op = ConstantFoldConstantImpl(Op,
DL, TLI, FoldedOps);
1216 return ConstantFoldConstantImpl(
C,
DL, TLI, FoldedOps);
1223 return ConstantFoldInstOperandsImpl(
I,
I->getOpcode(), Ops,
DL, TLI);
1240 if (
auto *CE0 = dyn_cast<ConstantExpr>(Ops0)) {
1242 if (CE0->getOpcode() == Instruction::IntToPtr) {
1243 Type *IntPtrTy =
DL.getIntPtrType(CE0->getType());
1254 if (CE0->getOpcode() == Instruction::PtrToInt) {
1255 Type *IntPtrTy =
DL.getIntPtrType(CE0->getOperand(0)->getType());
1256 if (CE0->getType() == IntPtrTy) {
1264 if (
auto *CE1 = dyn_cast<ConstantExpr>(Ops1)) {
1265 if (CE0->getOpcode() == CE1->getOpcode()) {
1266 if (CE0->getOpcode() == Instruction::IntToPtr) {
1267 Type *IntPtrTy =
DL.getIntPtrType(CE0->getType());
1280 if (CE0->getOpcode() == Instruction::PtrToInt) {
1281 Type *IntPtrTy =
DL.getIntPtrType(CE0->getOperand(0)->getType());
1282 if (CE0->getType() == IntPtrTy &&
1283 CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType()) {
1285 Predicate, CE0->getOperand(0), CE1->getOperand(0),
DL, TLI);
1293 if ((Predicate == ICmpInst::ICMP_EQ || Predicate == ICmpInst::ICMP_NE) &&
1294 CE0->getOpcode() == Instruction::Or && Ops1->
isNullValue()) {
1296 Predicate, CE0->getOperand(0), Ops1,
DL, TLI);
1298 Predicate, CE0->getOperand(1), Ops1,
DL, TLI);
1300 Predicate == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
1310 unsigned IndexWidth =
DL.getIndexTypeSizeInBits(Ops0->
getType());
1311 APInt Offset0(IndexWidth, 0);
1314 APInt Offset1(IndexWidth, 0);
1317 if (Stripped0 == Stripped1)
1323 }
else if (isa<ConstantExpr>(Ops1)) {
1326 Predicate = ICmpInst::getSwappedPredicate(Predicate);
1353 if (isa<ConstantExpr>(
LHS) || isa<ConstantExpr>(
RHS))
1364 if (!
I || !
I->getParent() || !
I->getFunction())
1367 ConstantFP *CFP = dyn_cast<ConstantFP>(Operand);
1436 case Instruction::PtrToInt:
1437 if (
auto *CE = dyn_cast<ConstantExpr>(
C)) {
1441 if (CE->getOpcode() == Instruction::IntToPtr) {
1444 CE->getOperand(0),
DL.getIntPtrType(CE->getType()),
1446 }
else if (
auto *
GEP = dyn_cast<GEPOperator>(CE)) {
1450 unsigned BitWidth =
DL.getIndexTypeSizeInBits(
GEP->getType());
1452 auto *
Base = cast<Constant>(
GEP->stripAndAccumulateConstantOffsets(
1453 DL, BaseOffset,
true));
1454 if (
Base->isNullValue()) {
1458 if (
GEP->getNumIndices() == 1 &&
1459 GEP->getSourceElementType()->isIntegerTy(8)) {
1460 auto *
Ptr = cast<Constant>(
GEP->getPointerOperand());
1461 auto *Sub = dyn_cast<ConstantExpr>(
GEP->getOperand(1));
1462 Type *IntIdxTy =
DL.getIndexType(
Ptr->getType());
1463 if (Sub && Sub->getType() == IntIdxTy &&
1464 Sub->getOpcode() == Instruction::Sub &&
1465 Sub->getOperand(0)->isNullValue())
1478 case Instruction::IntToPtr:
1483 if (
auto *CE = dyn_cast<ConstantExpr>(
C)) {
1484 if (CE->getOpcode() == Instruction::PtrToInt) {
1485 Constant *SrcPtr = CE->getOperand(0);
1486 unsigned SrcPtrSize =
DL.getPointerTypeSizeInBits(SrcPtr->
getType());
1487 unsigned MidIntSize = CE->getType()->getScalarSizeInBits();
1489 if (MidIntSize >= SrcPtrSize) {
1498 case Instruction::Trunc:
1499 case Instruction::ZExt:
1500 case Instruction::SExt:
1501 case Instruction::FPTrunc:
1502 case Instruction::FPExt:
1503 case Instruction::UIToFP:
1504 case Instruction::SIToFP:
1505 case Instruction::FPToUI:
1506 case Instruction::FPToSI:
1507 case Instruction::AddrSpaceCast:
1509 case Instruction::BitCast:
1519 if (Call->isNoBuiltin())
1521 if (Call->getFunctionType() !=
F->getFunctionType())
1523 switch (
F->getIntrinsicID()) {
1526 case Intrinsic::bswap:
1527 case Intrinsic::ctpop:
1528 case Intrinsic::ctlz:
1529 case Intrinsic::cttz:
1530 case Intrinsic::fshl:
1531 case Intrinsic::fshr:
1532 case Intrinsic::launder_invariant_group:
1533 case Intrinsic::strip_invariant_group:
1534 case Intrinsic::masked_load:
1535 case Intrinsic::get_active_lane_mask:
1536 case Intrinsic::abs:
1537 case Intrinsic::smax:
1538 case Intrinsic::smin:
1539 case Intrinsic::umax:
1540 case Intrinsic::umin:
1541 case Intrinsic::sadd_with_overflow:
1542 case Intrinsic::uadd_with_overflow:
1543 case Intrinsic::ssub_with_overflow:
1544 case Intrinsic::usub_with_overflow:
1545 case Intrinsic::smul_with_overflow:
1546 case Intrinsic::umul_with_overflow:
1547 case Intrinsic::sadd_sat:
1548 case Intrinsic::uadd_sat:
1549 case Intrinsic::ssub_sat:
1550 case Intrinsic::usub_sat:
1551 case Intrinsic::smul_fix:
1552 case Intrinsic::smul_fix_sat:
1553 case Intrinsic::bitreverse:
1554 case Intrinsic::is_constant:
1555 case Intrinsic::vector_reduce_add:
1556 case Intrinsic::vector_reduce_mul:
1557 case Intrinsic::vector_reduce_and:
1558 case Intrinsic::vector_reduce_or:
1559 case Intrinsic::vector_reduce_xor:
1560 case Intrinsic::vector_reduce_smin:
1561 case Intrinsic::vector_reduce_smax:
1562 case Intrinsic::vector_reduce_umin:
1563 case Intrinsic::vector_reduce_umax:
1565 case Intrinsic::amdgcn_perm:
1566 case Intrinsic::arm_mve_vctp8:
1567 case Intrinsic::arm_mve_vctp16:
1568 case Intrinsic::arm_mve_vctp32:
1569 case Intrinsic::arm_mve_vctp64:
1570 case Intrinsic::aarch64_sve_convert_from_svbool:
1572 case Intrinsic::wasm_trunc_signed:
1573 case Intrinsic::wasm_trunc_unsigned:
1578 case Intrinsic::minnum:
1579 case Intrinsic::maxnum:
1580 case Intrinsic::minimum:
1581 case Intrinsic::maximum:
1582 case Intrinsic::log:
1583 case Intrinsic::log2:
1584 case Intrinsic::log10:
1585 case Intrinsic::exp:
1586 case Intrinsic::exp2:
1587 case Intrinsic::sqrt:
1588 case Intrinsic::sin:
1589 case Intrinsic::cos:
1590 case Intrinsic::pow:
1591 case Intrinsic::powi:
1592 case Intrinsic::fma:
1593 case Intrinsic::fmuladd:
1594 case Intrinsic::fptoui_sat:
1595 case Intrinsic::fptosi_sat:
1596 case Intrinsic::convert_from_fp16:
1597 case Intrinsic::convert_to_fp16:
1598 case Intrinsic::amdgcn_cos:
1599 case Intrinsic::amdgcn_cubeid:
1600 case Intrinsic::amdgcn_cubema:
1601 case Intrinsic::amdgcn_cubesc:
1602 case Intrinsic::amdgcn_cubetc:
1603 case Intrinsic::amdgcn_fmul_legacy:
1604 case Intrinsic::amdgcn_fma_legacy:
1605 case Intrinsic::amdgcn_fract:
1606 case Intrinsic::amdgcn_ldexp:
1607 case Intrinsic::amdgcn_sin:
1609 case Intrinsic::x86_sse_cvtss2si:
1610 case Intrinsic::x86_sse_cvtss2si64:
1611 case Intrinsic::x86_sse_cvttss2si:
1612 case Intrinsic::x86_sse_cvttss2si64:
1613 case Intrinsic::x86_sse2_cvtsd2si:
1614 case Intrinsic::x86_sse2_cvtsd2si64:
1615 case Intrinsic::x86_sse2_cvttsd2si:
1616 case Intrinsic::x86_sse2_cvttsd2si64:
1617 case Intrinsic::x86_avx512_vcvtss2si32:
1618 case Intrinsic::x86_avx512_vcvtss2si64:
1619 case Intrinsic::x86_avx512_cvttss2si:
1620 case Intrinsic::x86_avx512_cvttss2si64:
1621 case Intrinsic::x86_avx512_vcvtsd2si32:
1622 case Intrinsic::x86_avx512_vcvtsd2si64:
1623 case Intrinsic::x86_avx512_cvttsd2si:
1624 case Intrinsic::x86_avx512_cvttsd2si64:
1625 case Intrinsic::x86_avx512_vcvtss2usi32:
1626 case Intrinsic::x86_avx512_vcvtss2usi64:
1627 case Intrinsic::x86_avx512_cvttss2usi:
1628 case Intrinsic::x86_avx512_cvttss2usi64:
1629 case Intrinsic::x86_avx512_vcvtsd2usi32:
1630 case Intrinsic::x86_avx512_vcvtsd2usi64:
1631 case Intrinsic::x86_avx512_cvttsd2usi:
1632 case Intrinsic::x86_avx512_cvttsd2usi64:
1633 return !Call->isStrictFP();
1637 case Intrinsic::fabs:
1638 case Intrinsic::copysign:
1639 case Intrinsic::is_fpclass:
1642 case Intrinsic::ceil:
1643 case Intrinsic::floor:
1644 case Intrinsic::round:
1645 case Intrinsic::roundeven:
1646 case Intrinsic::trunc:
1647 case Intrinsic::nearbyint:
1648 case Intrinsic::rint:
1649 case Intrinsic::canonicalize:
1652 case Intrinsic::experimental_constrained_fma:
1653 case Intrinsic::experimental_constrained_fmuladd:
1654 case Intrinsic::experimental_constrained_fadd:
1655 case Intrinsic::experimental_constrained_fsub:
1656 case Intrinsic::experimental_constrained_fmul:
1657 case Intrinsic::experimental_constrained_fdiv:
1658 case Intrinsic::experimental_constrained_frem:
1659 case Intrinsic::experimental_constrained_ceil:
1660 case Intrinsic::experimental_constrained_floor:
1661 case Intrinsic::experimental_constrained_round:
1662 case Intrinsic::experimental_constrained_roundeven:
1663 case Intrinsic::experimental_constrained_trunc:
1664 case Intrinsic::experimental_constrained_nearbyint:
1665 case Intrinsic::experimental_constrained_rint:
1666 case Intrinsic::experimental_constrained_fcmp:
1667 case Intrinsic::experimental_constrained_fcmps:
1674 if (!
F->hasName() || Call->isStrictFP())
1685 return Name ==
"acos" ||
Name ==
"acosf" ||
1686 Name ==
"asin" ||
Name ==
"asinf" ||
1687 Name ==
"atan" ||
Name ==
"atanf" ||
1688 Name ==
"atan2" ||
Name ==
"atan2f";
1690 return Name ==
"ceil" ||
Name ==
"ceilf" ||
1694 return Name ==
"exp" ||
Name ==
"expf" ||
1697 return Name ==
"fabs" ||
Name ==
"fabsf" ||
1698 Name ==
"floor" ||
Name ==
"floorf" ||
1701 return Name ==
"log" ||
Name ==
"logf" ||
1702 Name ==
"log2" ||
Name ==
"log2f" ||
1703 Name ==
"log10" ||
Name ==
"log10f";
1705 return Name ==
"nearbyint" ||
Name ==
"nearbyintf";
1707 return Name ==
"pow" ||
Name ==
"powf";
1709 return Name ==
"remainder" ||
Name ==
"remainderf" ||
1710 Name ==
"rint" ||
Name ==
"rintf" ||
1711 Name ==
"round" ||
Name ==
"roundf";
1713 return Name ==
"sin" ||
Name ==
"sinf" ||
1714 Name ==
"sinh" ||
Name ==
"sinhf" ||
1717 return Name ==
"tan" ||
Name ==
"tanf" ||
1718 Name ==
"tanh" ||
Name ==
"tanhf" ||
1719 Name ==
"trunc" ||
Name ==
"truncf";
1727 if (
Name.size() < 12 ||
Name[1] !=
'_')
1733 return Name ==
"__acos_finite" ||
Name ==
"__acosf_finite" ||
1734 Name ==
"__asin_finite" ||
Name ==
"__asinf_finite" ||
1735 Name ==
"__atan2_finite" ||
Name ==
"__atan2f_finite";
1737 return Name ==
"__cosh_finite" ||
Name ==
"__coshf_finite";
1739 return Name ==
"__exp_finite" ||
Name ==
"__expf_finite" ||
1740 Name ==
"__exp2_finite" ||
Name ==
"__exp2f_finite";
1742 return Name ==
"__log_finite" ||
Name ==
"__logf_finite" ||
1743 Name ==
"__log10_finite" ||
Name ==
"__log10f_finite";
1745 return Name ==
"__pow_finite" ||
Name ==
"__powf_finite";
1747 return Name ==
"__sinh_finite" ||
Name ==
"__sinhf_finite";
1758 APF.convert(Ty->
getFltSemantics(), APFloat::rmNearestTiesToEven, &unused);
1767inline void llvm_fenv_clearexcept() {
1768#if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT
1769 feclearexcept(FE_ALL_EXCEPT);
1775inline bool llvm_fenv_testexcept() {
1776 int errno_val = errno;
1777 if (errno_val == ERANGE || errno_val == EDOM)
1779#if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT && HAVE_DECL_FE_INEXACT
1780 if (fetestexcept(FE_ALL_EXCEPT & ~FE_INEXACT))
1788 llvm_fenv_clearexcept();
1789 double Result = NativeFP(
V.convertToDouble());
1790 if (llvm_fenv_testexcept()) {
1791 llvm_fenv_clearexcept();
1795 return GetConstantFoldFPValue(Result, Ty);
1798Constant *ConstantFoldBinaryFP(
double (*NativeFP)(
double,
double),
1800 llvm_fenv_clearexcept();
1801 double Result = NativeFP(
V.convertToDouble(),
W.convertToDouble());
1802 if (llvm_fenv_testexcept()) {
1803 llvm_fenv_clearexcept();
1807 return GetConstantFoldFPValue(Result, Ty);
1817 if (isa<ConstantAggregateZero>(Op))
1821 if (isa<PoisonValue>(Op) ||
Op->containsPoisonElement())
1825 if (!isa<ConstantVector>(Op) && !isa<ConstantDataVector>(Op))
1828 auto *EltC = dyn_cast<ConstantInt>(
Op->getAggregateElement(0U));
1832 APInt Acc = EltC->getValue();
1834 if (!(EltC = dyn_cast<ConstantInt>(
Op->getAggregateElement(
I))))
1836 const APInt &
X = EltC->getValue();
1838 case Intrinsic::vector_reduce_add:
1841 case Intrinsic::vector_reduce_mul:
1844 case Intrinsic::vector_reduce_and:
1847 case Intrinsic::vector_reduce_or:
1850 case Intrinsic::vector_reduce_xor:
1853 case Intrinsic::vector_reduce_smin:
1856 case Intrinsic::vector_reduce_smax:
1859 case Intrinsic::vector_reduce_umin:
1862 case Intrinsic::vector_reduce_umax:
1878Constant *ConstantFoldSSEConvertToInt(
const APFloat &Val,
bool roundTowardZero,
1879 Type *Ty,
bool IsSigned) {
1882 assert(ResultWidth <= 64 &&
1883 "Can only constant fold conversions to 64 and 32 bit ints");
1886 bool isExact =
false;
1888 : APFloat::rmNearestTiesToEven;
1891 IsSigned,
mode, &isExact);
1892 if (status != APFloat::opOK &&
1893 (!roundTowardZero || status != APFloat::opInexact))
1899 Type *Ty =
Op->getType();
1902 return Op->getValueAPF().convertToDouble();
1906 APF.
convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, &unused);
1910static bool getConstIntOrUndef(
Value *Op,
const APInt *&
C) {
1911 if (
auto *CI = dyn_cast<ConstantInt>(Op)) {
1912 C = &CI->getValue();
1915 if (isa<UndefValue>(Op)) {
1934 if (St == APFloat::opStatus::opOK)
1939 if (ORM && *ORM == RoundingMode::Dynamic)
1944 if (EB && *EB != fp::ExceptionBehavior::ebStrict)
1956 if (!ORM || *ORM == RoundingMode::Dynamic)
1961 return RoundingMode::NearestTiesToEven;
1983 if (Src.isNormal() || Src.isInfinity())
2024 if (IntrinsicID == Intrinsic::is_constant) {
2028 if (
Operands[0]->isManifestConstant())
2033 if (isa<PoisonValue>(
Operands[0])) {
2035 if (IntrinsicID == Intrinsic::canonicalize)
2039 if (isa<UndefValue>(
Operands[0])) {
2043 if (IntrinsicID == Intrinsic::cos ||
2044 IntrinsicID == Intrinsic::ctpop ||
2045 IntrinsicID == Intrinsic::fptoui_sat ||
2046 IntrinsicID == Intrinsic::fptosi_sat ||
2047 IntrinsicID == Intrinsic::canonicalize)
2049 if (IntrinsicID == Intrinsic::bswap ||
2050 IntrinsicID == Intrinsic::bitreverse ||
2051 IntrinsicID == Intrinsic::launder_invariant_group ||
2052 IntrinsicID == Intrinsic::strip_invariant_group)
2056 if (isa<ConstantPointerNull>(
Operands[0])) {
2058 if (IntrinsicID == Intrinsic::launder_invariant_group ||
2059 IntrinsicID == Intrinsic::strip_invariant_group) {
2064 Call->getParent() ?
Call->getCaller() :
nullptr;
2074 if (
auto *Op = dyn_cast<ConstantFP>(
Operands[0])) {
2075 if (IntrinsicID == Intrinsic::convert_to_fp16) {
2079 Val.
convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &lost);
2086 if (IntrinsicID == Intrinsic::wasm_trunc_signed ||
2087 IntrinsicID == Intrinsic::wasm_trunc_unsigned) {
2088 bool Signed = IntrinsicID == Intrinsic::wasm_trunc_signed;
2095 bool IsExact =
false;
2097 U.convertToInteger(
Int, APFloat::rmTowardZero, &IsExact);
2099 if (
Status == APFloat::opOK ||
Status == APFloat::opInexact)
2105 if (IntrinsicID == Intrinsic::fptoui_sat ||
2106 IntrinsicID == Intrinsic::fptosi_sat) {
2109 IntrinsicID == Intrinsic::fptoui_sat);
2111 U.convertToInteger(
Int, APFloat::rmTowardZero, &IsExact);
2115 if (IntrinsicID == Intrinsic::canonicalize)
2116 return constantFoldCanonicalize(Ty, Call, U);
2123 if (IntrinsicID == Intrinsic::nearbyint || IntrinsicID == Intrinsic::rint) {
2124 U.roundToIntegral(APFloat::rmNearestTiesToEven);
2128 if (IntrinsicID == Intrinsic::round) {
2129 U.roundToIntegral(APFloat::rmNearestTiesToAway);
2133 if (IntrinsicID == Intrinsic::roundeven) {
2134 U.roundToIntegral(APFloat::rmNearestTiesToEven);
2138 if (IntrinsicID == Intrinsic::ceil) {
2139 U.roundToIntegral(APFloat::rmTowardPositive);
2143 if (IntrinsicID == Intrinsic::floor) {
2144 U.roundToIntegral(APFloat::rmTowardNegative);
2148 if (IntrinsicID == Intrinsic::trunc) {
2149 U.roundToIntegral(APFloat::rmTowardZero);
2153 if (IntrinsicID == Intrinsic::fabs) {
2158 if (IntrinsicID == Intrinsic::amdgcn_fract) {
2164 FloorU.roundToIntegral(APFloat::rmTowardNegative);
2166 APFloat AlmostOne(
U.getSemantics(), 1);
2167 AlmostOne.next(
true);
2174 std::optional<APFloat::roundingMode>
RM;
2175 switch (IntrinsicID) {
2178 case Intrinsic::experimental_constrained_nearbyint:
2179 case Intrinsic::experimental_constrained_rint: {
2180 auto CI = cast<ConstrainedFPIntrinsic>(Call);
2181 RM = CI->getRoundingMode();
2182 if (!RM || *RM == RoundingMode::Dynamic)
2186 case Intrinsic::experimental_constrained_round:
2187 RM = APFloat::rmNearestTiesToAway;
2189 case Intrinsic::experimental_constrained_ceil:
2190 RM = APFloat::rmTowardPositive;
2192 case Intrinsic::experimental_constrained_floor:
2193 RM = APFloat::rmTowardNegative;
2195 case Intrinsic::experimental_constrained_trunc:
2196 RM = APFloat::rmTowardZero;
2200 auto CI = cast<ConstrainedFPIntrinsic>(Call);
2203 if (IntrinsicID == Intrinsic::experimental_constrained_rint &&
2204 St == APFloat::opInexact) {
2205 std::optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
2209 }
else if (
U.isSignaling()) {
2210 std::optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
2230 switch (IntrinsicID) {
2232 case Intrinsic::log:
2233 return ConstantFoldFP(log, APF, Ty);
2234 case Intrinsic::log2:
2236 return ConstantFoldFP(
log2, APF, Ty);
2237 case Intrinsic::log10:
2239 return ConstantFoldFP(log10, APF, Ty);
2240 case Intrinsic::exp:
2241 return ConstantFoldFP(exp, APF, Ty);
2242 case Intrinsic::exp2:
2244 return ConstantFoldBinaryFP(pow,
APFloat(2.0), APF, Ty);
2245 case Intrinsic::sin:
2246 return ConstantFoldFP(sin, APF, Ty);
2247 case Intrinsic::cos:
2248 return ConstantFoldFP(cos, APF, Ty);
2249 case Intrinsic::sqrt:
2250 return ConstantFoldFP(sqrt, APF, Ty);
2251 case Intrinsic::amdgcn_cos:
2252 case Intrinsic::amdgcn_sin: {
2253 double V = getValueAsDouble(Op);
2254 if (V < -256.0 || V > 256.0)
2259 bool IsCos = IntrinsicID == Intrinsic::amdgcn_cos;
2260 double V4 =
V * 4.0;
2261 if (V4 == floor(V4)) {
2263 const double SinVals[4] = { 0.0, 1.0, 0.0, -1.0 };
2264 V = SinVals[((int)V4 + (IsCos ? 1 : 0)) & 3];
2271 return GetConstantFoldFPValue(V, Ty);
2287 case LibFunc_acos_finite:
2288 case LibFunc_acosf_finite:
2290 return ConstantFoldFP(acos, APF, Ty);
2294 case LibFunc_asin_finite:
2295 case LibFunc_asinf_finite:
2297 return ConstantFoldFP(asin, APF, Ty);
2302 return ConstantFoldFP(atan, APF, Ty);
2306 if (TLI->
has(Func)) {
2307 U.roundToIntegral(APFloat::rmTowardPositive);
2314 return ConstantFoldFP(cos, APF, Ty);
2318 case LibFunc_cosh_finite:
2319 case LibFunc_coshf_finite:
2321 return ConstantFoldFP(cosh, APF, Ty);
2325 case LibFunc_exp_finite:
2326 case LibFunc_expf_finite:
2328 return ConstantFoldFP(exp, APF, Ty);
2332 case LibFunc_exp2_finite:
2333 case LibFunc_exp2f_finite:
2336 return ConstantFoldBinaryFP(pow,
APFloat(2.0), APF, Ty);
2340 if (TLI->
has(Func)) {
2346 case LibFunc_floorf:
2347 if (TLI->
has(Func)) {
2348 U.roundToIntegral(APFloat::rmTowardNegative);
2354 case LibFunc_log_finite:
2355 case LibFunc_logf_finite:
2357 return ConstantFoldFP(log, APF, Ty);
2361 case LibFunc_log2_finite:
2362 case LibFunc_log2f_finite:
2365 return ConstantFoldFP(
log2, APF, Ty);
2368 case LibFunc_log10f:
2369 case LibFunc_log10_finite:
2370 case LibFunc_log10f_finite:
2373 return ConstantFoldFP(log10, APF, Ty);
2375 case LibFunc_nearbyint:
2376 case LibFunc_nearbyintf:
2379 if (TLI->
has(Func)) {
2380 U.roundToIntegral(APFloat::rmNearestTiesToEven);
2385 case LibFunc_roundf:
2386 if (TLI->
has(Func)) {
2387 U.roundToIntegral(APFloat::rmNearestTiesToAway);
2394 return ConstantFoldFP(sin, APF, Ty);
2398 case LibFunc_sinh_finite:
2399 case LibFunc_sinhf_finite:
2401 return ConstantFoldFP(sinh, APF, Ty);
2406 return ConstantFoldFP(sqrt, APF, Ty);
2411 return ConstantFoldFP(tan, APF, Ty);
2416 return ConstantFoldFP(tanh, APF, Ty);
2419 case LibFunc_truncf:
2420 if (TLI->
has(Func)) {
2421 U.roundToIntegral(APFloat::rmTowardZero);
2429 if (
auto *Op = dyn_cast<ConstantInt>(
Operands[0])) {
2430 switch (IntrinsicID) {
2431 case Intrinsic::bswap:
2433 case Intrinsic::ctpop:
2435 case Intrinsic::bitreverse:
2437 case Intrinsic::convert_from_fp16: {
2438 APFloat Val(APFloat::IEEEhalf(),
Op->getValue());
2446 assert(status != APFloat::opInexact && !lost &&
2447 "Precision lost during fp16 constfolding");
2456 switch (IntrinsicID) {
2458 case Intrinsic::vector_reduce_add:
2459 case Intrinsic::vector_reduce_mul:
2460 case Intrinsic::vector_reduce_and:
2461 case Intrinsic::vector_reduce_or:
2462 case Intrinsic::vector_reduce_xor:
2463 case Intrinsic::vector_reduce_smin:
2464 case Intrinsic::vector_reduce_smax:
2465 case Intrinsic::vector_reduce_umin:
2466 case Intrinsic::vector_reduce_umax:
2473 if (isa<ConstantVector>(
Operands[0]) ||
2474 isa<ConstantDataVector>(
Operands[0])) {
2476 switch (IntrinsicID) {
2478 case Intrinsic::x86_sse_cvtss2si:
2479 case Intrinsic::x86_sse_cvtss2si64:
2480 case Intrinsic::x86_sse2_cvtsd2si:
2481 case Intrinsic::x86_sse2_cvtsd2si64:
2483 dyn_cast_or_null<ConstantFP>(
Op->getAggregateElement(0U)))
2484 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2488 case Intrinsic::x86_sse_cvttss2si:
2489 case Intrinsic::x86_sse_cvttss2si64:
2490 case Intrinsic::x86_sse2_cvttsd2si:
2491 case Intrinsic::x86_sse2_cvttsd2si64:
2493 dyn_cast_or_null<ConstantFP>(
Op->getAggregateElement(0U)))
2494 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2507 auto *FCmp = cast<ConstrainedFPCmpIntrinsic>(Call);
2509 if (FCmp->isSignaling()) {
2511 St = APFloat::opInvalidOp;
2514 St = APFloat::opInvalidOp;
2533 bool IsOp0Undef = isa<UndefValue>(
Operands[0]);
2534 bool IsOp1Undef = isa<UndefValue>(
Operands[1]);
2535 switch (IntrinsicID) {
2536 case Intrinsic::maxnum:
2537 case Intrinsic::minnum:
2538 case Intrinsic::maximum:
2539 case Intrinsic::minimum:
2549 if (
const auto *Op1 = dyn_cast<ConstantFP>(
Operands[0])) {
2550 const APFloat &Op1V = Op1->getValueAPF();
2552 if (
const auto *Op2 = dyn_cast<ConstantFP>(
Operands[1])) {
2553 if (Op2->getType() != Op1->getType())
2555 const APFloat &Op2V = Op2->getValueAPF();
2557 if (
const auto *ConstrIntr = dyn_cast<ConstrainedFPIntrinsic>(Call)) {
2561 switch (IntrinsicID) {
2564 case Intrinsic::experimental_constrained_fadd:
2565 St = Res.
add(Op2V, RM);
2567 case Intrinsic::experimental_constrained_fsub:
2570 case Intrinsic::experimental_constrained_fmul:
2573 case Intrinsic::experimental_constrained_fdiv:
2574 St = Res.
divide(Op2V, RM);
2576 case Intrinsic::experimental_constrained_frem:
2579 case Intrinsic::experimental_constrained_fcmp:
2580 case Intrinsic::experimental_constrained_fcmps:
2581 return evaluateCompare(Op1V, Op2V, ConstrIntr);
2589 switch (IntrinsicID) {
2592 case Intrinsic::copysign:
2594 case Intrinsic::minnum:
2596 case Intrinsic::maxnum:
2598 case Intrinsic::minimum:
2600 case Intrinsic::maximum:
2607 switch (IntrinsicID) {
2610 case Intrinsic::pow:
2611 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
2612 case Intrinsic::amdgcn_fmul_legacy:
2632 case LibFunc_pow_finite:
2633 case LibFunc_powf_finite:
2635 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
2639 if (TLI->
has(Func)) {
2641 if (APFloat::opStatus::opOK ==
V.mod(Op2->getValueAPF()))
2645 case LibFunc_remainder:
2646 case LibFunc_remainderf:
2647 if (TLI->
has(Func)) {
2649 if (APFloat::opStatus::opOK ==
V.remainder(Op2->getValueAPF()))
2654 case LibFunc_atan2f:
2660 case LibFunc_atan2_finite:
2661 case LibFunc_atan2f_finite:
2663 return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty);
2666 }
else if (
auto *Op2C = dyn_cast<ConstantInt>(
Operands[1])) {
2667 switch (IntrinsicID) {
2668 case Intrinsic::is_fpclass: {
2689 if (IntrinsicID == Intrinsic::powi && Ty->
isHalfTy())
2693 (
int)Op2C->getZExtValue())));
2694 if (IntrinsicID == Intrinsic::powi && Ty->
isFloatTy())
2698 (
int)Op2C->getZExtValue())));
2699 if (IntrinsicID == Intrinsic::powi && Ty->
isDoubleTy())
2703 (
int)Op2C->getZExtValue())));
2705 if (IntrinsicID == Intrinsic::amdgcn_ldexp) {
2711 APFloat::rmNearestTiesToEven);
2720 const APInt *C0, *C1;
2721 if (!getConstIntOrUndef(
Operands[0], C0) ||
2722 !getConstIntOrUndef(
Operands[1], C1))
2725 switch (IntrinsicID) {
2727 case Intrinsic::smax:
2728 case Intrinsic::smin:
2729 case Intrinsic::umax:
2730 case Intrinsic::umin:
2746 case Intrinsic::usub_with_overflow:
2747 case Intrinsic::ssub_with_overflow:
2753 case Intrinsic::uadd_with_overflow:
2754 case Intrinsic::sadd_with_overflow:
2759 cast<StructType>(Ty),
2764 case Intrinsic::smul_with_overflow:
2765 case Intrinsic::umul_with_overflow: {
2773 switch (IntrinsicID) {
2775 case Intrinsic::sadd_with_overflow:
2776 Res = C0->
sadd_ov(*C1, Overflow);
2778 case Intrinsic::uadd_with_overflow:
2779 Res = C0->
uadd_ov(*C1, Overflow);
2781 case Intrinsic::ssub_with_overflow:
2782 Res = C0->
ssub_ov(*C1, Overflow);
2784 case Intrinsic::usub_with_overflow:
2785 Res = C0->
usub_ov(*C1, Overflow);
2787 case Intrinsic::smul_with_overflow:
2788 Res = C0->
smul_ov(*C1, Overflow);
2790 case Intrinsic::umul_with_overflow:
2791 Res = C0->
umul_ov(*C1, Overflow);
2800 case Intrinsic::uadd_sat:
2801 case Intrinsic::sadd_sat:
2811 if (IntrinsicID == Intrinsic::uadd_sat)
2815 case Intrinsic::usub_sat:
2816 case Intrinsic::ssub_sat:
2826 if (IntrinsicID == Intrinsic::usub_sat)
2830 case Intrinsic::cttz:
2831 case Intrinsic::ctlz:
2832 assert(C1 &&
"Must be constant int");
2839 if (IntrinsicID == Intrinsic::cttz)
2844 case Intrinsic::abs:
2845 assert(C1 &&
"Must be constant int");
2863 if ((isa<ConstantVector>(
Operands[0]) ||
2864 isa<ConstantDataVector>(
Operands[0])) &&
2868 cast<ConstantInt>(
Operands[1])->getValue() == 4) {
2870 switch (IntrinsicID) {
2872 case Intrinsic::x86_avx512_vcvtss2si32:
2873 case Intrinsic::x86_avx512_vcvtss2si64:
2874 case Intrinsic::x86_avx512_vcvtsd2si32:
2875 case Intrinsic::x86_avx512_vcvtsd2si64:
2877 dyn_cast_or_null<ConstantFP>(
Op->getAggregateElement(0U)))
2878 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2882 case Intrinsic::x86_avx512_vcvtss2usi32:
2883 case Intrinsic::x86_avx512_vcvtss2usi64:
2884 case Intrinsic::x86_avx512_vcvtsd2usi32:
2885 case Intrinsic::x86_avx512_vcvtsd2usi64:
2887 dyn_cast_or_null<ConstantFP>(
Op->getAggregateElement(0U)))
2888 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2892 case Intrinsic::x86_avx512_cvttss2si:
2893 case Intrinsic::x86_avx512_cvttss2si64:
2894 case Intrinsic::x86_avx512_cvttsd2si:
2895 case Intrinsic::x86_avx512_cvttsd2si64:
2897 dyn_cast_or_null<ConstantFP>(
Op->getAggregateElement(0U)))
2898 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2902 case Intrinsic::x86_avx512_cvttss2usi:
2903 case Intrinsic::x86_avx512_cvttss2usi64:
2904 case Intrinsic::x86_avx512_cvttsd2usi:
2905 case Intrinsic::x86_avx512_cvttsd2usi64:
2907 dyn_cast_or_null<ConstantFP>(
Op->getAggregateElement(0U)))
2908 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2935 }
else if (
abs(S1) >=
abs(S0)) {
2958 switch (IntrinsicID) {
2961 case Intrinsic::amdgcn_cubeid:
2963 case Intrinsic::amdgcn_cubema:
2965 case Intrinsic::amdgcn_cubesc:
2967 case Intrinsic::amdgcn_cubetc:
2974 const APInt *C0, *C1, *C2;
2975 if (!getConstIntOrUndef(
Operands[0], C0) ||
2976 !getConstIntOrUndef(
Operands[1], C1) ||
2977 !getConstIntOrUndef(
Operands[2], C2))
2984 unsigned NumUndefBytes = 0;
2985 for (
unsigned I = 0;
I < 32;
I += 8) {
2994 const APInt *Src = ((Sel & 10) == 10 || (Sel & 12) == 4) ? C0 : C1;
2998 B = Src->extractBitsAsZExtValue(8, (Sel & 3) * 8);
3000 B = Src->extractBitsAsZExtValue(1, (Sel & 1) ? 31 : 15) * 0xff;
3003 Val.insertBits(
B,
I, 8);
3006 if (NumUndefBytes == 4)
3020 if (
const auto *Op1 = dyn_cast<ConstantFP>(
Operands[0])) {
3021 if (
const auto *Op2 = dyn_cast<ConstantFP>(
Operands[1])) {
3022 if (
const auto *Op3 = dyn_cast<ConstantFP>(
Operands[2])) {
3023 const APFloat &C1 = Op1->getValueAPF();
3024 const APFloat &C2 = Op2->getValueAPF();
3025 const APFloat &C3 = Op3->getValueAPF();
3027 if (
const auto *ConstrIntr = dyn_cast<ConstrainedFPIntrinsic>(Call)) {
3031 switch (IntrinsicID) {
3034 case Intrinsic::experimental_constrained_fma:
3035 case Intrinsic::experimental_constrained_fmuladd:
3039 if (mayFoldConstrained(
3045 switch (IntrinsicID) {
3047 case Intrinsic::amdgcn_fma_legacy: {
3057 case Intrinsic::fma:
3058 case Intrinsic::fmuladd: {
3060 V.fusedMultiplyAdd(C2, C3, APFloat::rmNearestTiesToEven);
3063 case Intrinsic::amdgcn_cubeid:
3064 case Intrinsic::amdgcn_cubema:
3065 case Intrinsic::amdgcn_cubesc:
3066 case Intrinsic::amdgcn_cubetc: {
3067 APFloat V = ConstantFoldAMDGCNCubeIntrinsic(IntrinsicID, C1, C2, C3);
3075 if (IntrinsicID == Intrinsic::smul_fix ||
3076 IntrinsicID == Intrinsic::smul_fix_sat) {
3082 const APInt *C0, *C1;
3083 if (!getConstIntOrUndef(
Operands[0], C0) ||
3084 !getConstIntOrUndef(
Operands[1], C1))
3098 unsigned Scale = cast<ConstantInt>(
Operands[2])->getZExtValue();
3100 assert(Scale < Width &&
"Illegal scale.");
3101 unsigned ExtendedWidth =
Width * 2;
3103 (C0->
sext(ExtendedWidth) * C1->
sext(ExtendedWidth)).ashr(Scale);
3104 if (IntrinsicID == Intrinsic::smul_fix_sat) {
3113 if (IntrinsicID == Intrinsic::fshl || IntrinsicID == Intrinsic::fshr) {
3114 const APInt *C0, *C1, *C2;
3115 if (!getConstIntOrUndef(
Operands[0], C0) ||
3116 !getConstIntOrUndef(
Operands[1], C1) ||
3117 !getConstIntOrUndef(
Operands[2], C2))
3120 bool IsRight = IntrinsicID == Intrinsic::fshr;
3134 unsigned LshrAmt = IsRight ? ShAmt :
BitWidth - ShAmt;
3135 unsigned ShlAmt = !IsRight ? ShAmt :
BitWidth - ShAmt;
3143 if (IntrinsicID == Intrinsic::amdgcn_perm)
3144 return ConstantFoldAMDGCNPermIntrinsic(
Operands, Ty);
3156 return ConstantFoldScalarCall1(
Name, IntrinsicID, Ty,
Operands, TLI, Call);
3159 return ConstantFoldScalarCall2(
Name, IntrinsicID, Ty,
Operands, TLI, Call);
3162 return ConstantFoldScalarCall3(
Name, IntrinsicID, Ty,
Operands, TLI, Call);
3167static Constant *ConstantFoldFixedVectorCall(
3175 switch (IntrinsicID) {
3176 case Intrinsic::masked_load: {
3185 auto *MaskElt =
Mask->getAggregateElement(
I);
3188 auto *PassthruElt = Passthru->getAggregateElement(
I);
3190 if (isa<UndefValue>(MaskElt)) {
3198 if (MaskElt->isNullValue()) {
3202 }
else if (MaskElt->isOneValue()) {
3214 case Intrinsic::arm_mve_vctp8:
3215 case Intrinsic::arm_mve_vctp16:
3216 case Intrinsic::arm_mve_vctp32:
3217 case Intrinsic::arm_mve_vctp64: {
3218 if (
auto *Op = dyn_cast<ConstantInt>(
Operands[0])) {
3223 for (
unsigned i = 0; i < Lanes; i++) {
3233 case Intrinsic::get_active_lane_mask: {
3234 auto *Op0 = dyn_cast<ConstantInt>(
Operands[0]);
3235 auto *Op1 = dyn_cast<ConstantInt>(
Operands[1]);
3239 uint64_t Limit = Op1->getZExtValue();
3242 for (
unsigned i = 0; i < Lanes; i++) {
3243 if (
Base + i < Limit)
3258 for (
unsigned J = 0, JE =
Operands.size(); J != JE; ++J) {
3274 ConstantFoldScalarCall(
Name, IntrinsicID, Ty, Lane, TLI, Call);
3283static Constant *ConstantFoldScalableVectorCall(
3287 switch (IntrinsicID) {
3288 case Intrinsic::aarch64_sve_convert_from_svbool: {
3289 auto *Src = dyn_cast<Constant>(
Operands[0]);
3290 if (!Src || !Src->isNullValue())
3306 if (Call->isNoBuiltin())
3321 Type *Ty =
F->getReturnType();
3322 if (
auto *FVTy = dyn_cast<FixedVectorType>(Ty))
3323 return ConstantFoldFixedVectorCall(
3325 F->getParent()->getDataLayout(), TLI, Call);
3327 if (
auto *SVTy = dyn_cast<ScalableVectorType>(Ty))
3328 return ConstantFoldScalableVectorCall(
3330 F->getParent()->getDataLayout(), TLI, Call);
3335 return ConstantFoldScalarCall(
Name,
F->getIntrinsicID(), Ty,
Operands, TLI,
3343 if (Call->isNoBuiltin() || Call->isStrictFP())
3345 Function *
F = Call->getCalledFunction();
3353 if (Call->arg_size() == 1) {
3354 if (
ConstantFP *OpC = dyn_cast<ConstantFP>(Call->getArgOperand(0))) {
3355 const APFloat &Op = OpC->getValueAPF();
3363 case LibFunc_log10l:
3365 case LibFunc_log10f:
3366 return Op.isNaN() || (!Op.isZero() && !Op.isNegative());
3372 if (OpC->getType()->isDoubleTy())
3374 if (OpC->getType()->isFloatTy())
3382 if (OpC->getType()->isDoubleTy())
3384 if (OpC->getType()->isFloatTy())
3394 return !Op.isInfinity();
3398 case LibFunc_tanf: {
3401 Type *Ty = OpC->getType();
3403 return ConstantFoldFP(tan, OpC->getValueAPF(), Ty) !=
nullptr;
3420 return !(Op <
APFloat(Op.getSemantics(),
"-1") ||
3421 Op >
APFloat(Op.getSemantics(),
"1"));
3430 if (OpC->getType()->isDoubleTy())
3432 if (OpC->getType()->isFloatTy())
3439 return Op.isNaN() || Op.isZero() || !Op.isNegative();
3449 if (Call->arg_size() == 2) {
3450 ConstantFP *Op0C = dyn_cast<ConstantFP>(Call->getArgOperand(0));
3451 ConstantFP *Op1C = dyn_cast<ConstantFP>(Call->getArgOperand(1));
3459 case LibFunc_powf: {
3465 return ConstantFoldBinaryFP(pow, Op0, Op1, Ty) !=
nullptr;
3473 case LibFunc_remainderl:
3474 case LibFunc_remainder:
3475 case LibFunc_remainderf:
3480 case LibFunc_atan2f:
3481 case LibFunc_atan2l:
3497void TargetFolder::anchor() {}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
This file implements the APSInt class, which is a simple class that represents an arbitrary sized int...
SmallVector< MachineOperand, 4 > Cond
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static Constant * FoldBitCast(Constant *V, Type *DestTy)
Constant * getConstantAtOffset(Constant *Base, APInt Offset, const DataLayout &DL)
If this Offset points exactly to the start of an aggregate element, return that element,...
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file defines the DenseMap class.
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
amode Optimize addressing mode
mir Rename Register Operands
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
static APFloat getQNaN(const fltSemantics &Sem, bool Negative=false, const APInt *payload=nullptr)
Factory for QNaN values.
opStatus divide(const APFloat &RHS, roundingMode RM)
void copySign(const APFloat &RHS)
opStatus convert(const fltSemantics &ToSemantics, roundingMode RM, bool *losesInfo)
opStatus subtract(const APFloat &RHS, roundingMode RM)
double convertToDouble() const
Converts this APFloat to host double value.
bool isPosInfinity() const
opStatus add(const APFloat &RHS, roundingMode RM)
const fltSemantics & getSemantics() const
opStatus multiply(const APFloat &RHS, roundingMode RM)
opStatus fusedMultiplyAdd(const APFloat &Multiplicand, const APFloat &Addend, roundingMode RM)
APInt bitcastToAPInt() const
opStatus convertToInteger(MutableArrayRef< integerPart > Input, unsigned int Width, bool IsSigned, roundingMode RM, bool *IsExact) const
opStatus mod(const APFloat &RHS)
bool isNegInfinity() const
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
Class for arbitrary precision integers.
APInt umul_ov(const APInt &RHS, bool &Overflow) const
APInt usub_sat(const APInt &RHS) const
bool isMinSignedValue() const
Determine if this is the smallest signed value.
uint64_t getZExtValue() const
Get zero extended value.
uint64_t extractBitsAsZExtValue(unsigned numBits, unsigned bitPosition) const
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
APInt abs() const
Get the absolute value.
APInt sadd_sat(const APInt &RHS) const
APInt usub_ov(const APInt &RHS, bool &Overflow) const
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
APInt urem(const APInt &RHS) const
Unsigned remainder operation.
unsigned getBitWidth() const
Return the number of bits in the APInt.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
APInt sadd_ov(const APInt &RHS, bool &Overflow) const
APInt uadd_ov(const APInt &RHS, bool &Overflow) const
unsigned countr_zero() const
Count the number of trailing zero bits.
unsigned countl_zero() const
The APInt version of std::countl_zero.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
APInt uadd_sat(const APInt &RHS) const
APInt smul_ov(const APInt &RHS, bool &Overflow) const
APInt sext(unsigned width) const
Sign extend to a new width.
APInt shl(unsigned shiftAmt) const
Left-shift function.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
APInt extractBits(unsigned numBits, unsigned bitPosition) const
Return an APInt with the extracted bits [bitPosition,bitPosition+numBits).
APInt ssub_ov(const APInt &RHS, bool &Overflow) const
bool isOne() const
Determine if this is a value of 1.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
APInt ssub_sat(const APInt &RHS) const
An arbitrary precision integer that knows its signedness.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & back() const
back - Get the last element.
size_t size() const
size - Get the array size.
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
static Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)
Returns the opcode necessary to cast Val into Ty using usual casting rules.
static bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
static Constant * get(LLVMContext &Context, ArrayRef< ElementTy > Elts)
get() constructor - Return a constant with array type with an element count and element type matching...
static Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getExtractElement(Constant *Vec, Constant *Idx, Type *OnlyIfReducedTy=nullptr)
static Constant * getPointerCast(Constant *C, Type *Ty)
Create a BitCast, AddrSpaceCast, or a PtrToInt cast constant expression.
static Constant * getCast(unsigned ops, Constant *C, Type *Ty, bool OnlyIfReduced=false)
Convenience function for getting a Cast operation.
static Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static Constant * getZExt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getInsertElement(Constant *Vec, Constant *Elt, Constant *Idx, Type *OnlyIfReducedTy=nullptr)
static Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getShuffleVector(Constant *V1, Constant *V2, ArrayRef< int > Mask, Type *OnlyIfReducedTy=nullptr)
static bool isSupportedGetElementPtr(const Type *SrcElemTy)
Whether creating a constant expression for this getelementptr type is supported.
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, bool InBounds=false, std::optional< unsigned > InRangeIndex=std::nullopt, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
static Constant * getIntegerCast(Constant *C, Type *Ty, bool IsSigned)
Create a ZExt, Bitcast or Trunc for integer -> integer casts.
static Constant * getShl(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static Constant * getLShr(Constant *C1, Constant *C2, bool isExact=false)
static Constant * get(unsigned Opcode, Constant *C1, Constant *C2, unsigned Flags=0, Type *OnlyIfReducedTy=nullptr)
get - Return a binary or shift operator constant expression, folding if possible.
static bool isDesirableBinOp(unsigned Opcode)
Whether creating a constant expression for this binary operator is desirable.
static Constant * getOr(Constant *C1, Constant *C2)
static Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getCompare(unsigned short pred, Constant *C1, Constant *C2, bool OnlyIfReduced=false)
Return an ICmp or FCmp comparison operator constant expression.
ConstantFP - Floating Point Values [float, double].
const APFloat & getValueAPF() const
static Constant * get(Type *Ty, double V)
This returns a ConstantFP, or a vector containing a splat of a ConstantFP, for the specified value in...
static Constant * getZero(Type *Ty, bool Negative=false)
static ConstantInt * getTrue(LLVMContext &Context)
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
static ConstantInt * getFalse(LLVMContext &Context)
static Constant * get(StructType *T, ArrayRef< Constant * > V)
static Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
static Constant * getAllOnesValue(Type *Ty)
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Constrained floating point compare intrinsics.
This is the common base class for constrained floating point intrinsics.
std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
std::optional< RoundingMode > getRoundingMode() const
Wrapper for a function that represents a value that functionally represents the original function.
A parsed version of the target data layout string in and methods for querying it.
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
static bool compare(const APFloat &LHS, const APFloat &RHS, FCmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
Class to represent fixed width SIMD vectors.
unsigned getNumElements() const
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
DenormalMode getDenormalMode(const fltSemantics &FPType) const
Returns the denormal handling type for the default rounding mode of the function.
Type * getSourceElementType() const
std::optional< unsigned > getInRangeIndex() const
Returns the offset of the index with an inrange attachment, or std::nullopt if none.
static Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
static Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
Module * getParent()
Get the module that this global value is contained inside of...
PointerType * getType() const
Global values are always pointers.
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
static bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
const BasicBlock * getParent() const
const Function * getFunction() const
Return the function this instruction belongs to.
Class to represent integer types.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
static APInt getSaturationPoint(Intrinsic::ID ID, unsigned numBits)
Min/max intrinsics are monotonic, they operate on a fixed-bitwidth values, so there is a certain thre...
ICmpInst::Predicate getPredicate() const
Returns the comparison predicate underlying the intrinsic.
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Class to represent scalable SIMD vectors.
void push_back(const T &Elt)
pointer data()
Return a pointer to the vector's buffer, even if empty().
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
unsigned getElementContainingOffset(uint64_t FixedOffset) const
Given a valid byte offset into the structure, returns the structure index that contains it.
TypeSize getElementOffset(unsigned Idx) const
Provides information about what library functions are available for the current target.
bool has(LibFunc F) const
Tests whether a library function is available.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
The instances of the Type class are immutable: once they are created, they are never changed.
unsigned getIntegerBitWidth() const
Type * getStructElementType(unsigned N) const
const fltSemantics & getFltSemantics() const
bool isVectorTy() const
True if this is an instance of VectorType.
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
bool isPointerTy() const
True if this is an instance of PointerType.
static IntegerType * getInt1Ty(LLVMContext &C)
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
Type * getNonOpaquePointerElementType() const
Only use this method in code that is not reachable with opaque pointers, or part of deprecated method...
bool isBFloatTy() const
Return true if this is 'bfloat', a 16-bit bfloat type.
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isX86_MMXTy() const
Return true if this is X86 MMX.
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool isStructTy() const
True if this is an instance of StructType.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
static IntegerType * getInt16Ty(LLVMContext &C)
bool isAggregateType() const
Return true if the type is an aggregate type.
bool isHalfTy() const
Return true if this is 'half', a 16-bit IEEE fp type.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
static IntegerType * getInt8Ty(LLVMContext &C)
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
bool isX86_AMXTy() const
Return true if this is X86 AMX.
static IntegerType * getInt32Ty(LLVMContext &C)
static IntegerType * getInt64Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isIEEELikeFPTy() const
Return true if this is a well-behaved IEEE-like type, which has a IEEE compatible layout as defined b...
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
LLVMContext & getContext() const
All values hold a context through their type.
Type * getElementType() const
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
const APInt & smin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be signed.
const APInt & smax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be signed.
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
const APInt & umax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be unsigned.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
@ SC
CHAIN = SC CHAIN, Imm128 - System call.
@ CE
Windows NT (Windows on ARM)
@ ebStrict
This corresponds to "fpexcept.strict".
@ ebIgnore
This corresponds to "fpexcept.ignore".
std::error_code status(const Twine &path, file_status &result, bool follow=true)
Get file status as if by POSIX stat().
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Constant * ConstantFoldLoadThroughBitcast(Constant *C, Type *DestTy, const DataLayout &DL)
ConstantFoldLoadThroughBitcast - try to cast constant to destination type returning null if unsuccess...
static double log2(double V)
Constant * ConstantFoldSelectInstruction(Constant *Cond, Constant *V1, Constant *V2)
Attempt to constant fold a select instruction with the specified operands.
bool canConstantFoldCallTo(const CallBase *Call, const Function *F)
canConstantFoldCallTo - Return true if its even possible to fold a call to the specified function.
APFloat abs(APFloat X)
Returns the absolute value of the argument.
Constant * ConstantFoldFPInstOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL, const Instruction *I)
Attempt to constant fold a floating point binary operation with the specified operands,...
Constant * ConstantFoldUnaryInstruction(unsigned Opcode, Constant *V)
bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, APInt &Offset, const DataLayout &DL, DSOLocalEquivalent **DSOEquiv=nullptr)
If this constant is a constant offset from a global, return the global and the constant.
bool isMathLibCallNoop(const CallBase *Call, const TargetLibraryInfo *TLI)
Check whether the given call has no side-effects.
Constant * ReadByteArrayFromGlobal(const GlobalVariable *GV, uint64_t Offset)
LLVM_READONLY APFloat maximum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2018 maximum semantics.
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments and pointer casts from the specified value,...
Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
Constant * ConstantFoldExtractValueInstruction(Constant *Agg, ArrayRef< unsigned > Idxs)
Attempt to constant fold an extractvalue instruction with the specified operands and indices.
Constant * ConstantFoldCall(const CallBase *Call, Function *F, ArrayRef< Constant * > Operands, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldCall - Attempt to constant fold a call to the specified function with the specified argum...
Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Fold the constant using the specified DataLayout.
LLVM_READONLY APFloat maxnum(const APFloat &A, const APFloat &B)
Implements IEEE maxNum semantics.
Constant * ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op, const DataLayout &DL)
Attempt to constant fold a unary operation with the specified operand.
Constant * FlushFPConstant(Constant *Operand, const Instruction *I, bool IsOutput)
Attempt to flush float point constant according to denormal mode set in the instruction's parent func...
Constant * ConstantFoldInstOperands(Instruction *I, ArrayRef< Constant * > Ops, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldInstOperands - Attempt to constant fold an instruction with the specified operands.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
APFloat scalbn(APFloat X, int Exp, APFloat::roundingMode RM)
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
Constant * ConstantFoldLoadFromConst(Constant *C, Type *Ty, const APInt &Offset, const DataLayout &DL)
Extract value of C at the given Offset reinterpreted as Ty.
Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
Constant * ConstantFoldLoadFromUniformValue(Constant *C, Type *Ty)
If C is a uniform value where all bits are the same (either all zero, all ones, all undef or all pois...
LLVM_READONLY APFloat minnum(const APFloat &A, const APFloat &B)
Implements IEEE minNum semantics.
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
Constant * ConstantFoldInstruction(Instruction *I, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldInstruction - Try to constant fold the specified instruction.
RoundingMode
Rounding mode.
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
constexpr unsigned BitWidth
bool isVectorIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, unsigned ScalarOpdIdx)
Identifies if the vector form of the intrinsic has a scalar operand.
Constant * ConstantFoldInsertValueInstruction(Constant *Agg, Constant *Val, ArrayRef< unsigned > Idxs)
ConstantFoldInsertValueInstruction - Attempt to constant fold an insertvalue instruction with the spe...
Constant * ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, APInt Offset, const DataLayout &DL)
Return the value that a load from C with offset Offset would produce if it is constant and determinab...
LLVM_READONLY APFloat minimum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2018 minimum semantics.
Constant * ConstantFoldBinaryInstruction(unsigned Opcode, Constant *V1, Constant *V2)
opStatus
IEEE-754R 7: Default exception handling.
Represent subnormal handling kind for floating point instruction inputs and outputs.
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
DenormalModeKind
Represent handled modes for denormal (aka subnormal) modes in the floating point environment.
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
@ PositiveZero
Denormals are flushed to positive zero.
@ Dynamic
Denormals have unknown treatment.
@ IEEE
IEEE-754 denormal numbers preserved.
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
static constexpr DenormalMode getDynamic()
static constexpr DenormalMode getIEEE()
bool isConstant() const
Returns true if we know the value of all bits.
const APInt & getConstant() const
Returns the value when all bits have a known value.