31#include "llvm/Config/config.h"
45#include "llvm/IR/IntrinsicsAArch64.h"
46#include "llvm/IR/IntrinsicsAMDGPU.h"
47#include "llvm/IR/IntrinsicsARM.h"
48#include "llvm/IR/IntrinsicsNVPTX.h"
49#include "llvm/IR/IntrinsicsWebAssembly.h"
50#include "llvm/IR/IntrinsicsX86.h"
79 unsigned BitShift =
DL.getTypeSizeInBits(SrcEltTy);
80 for (
unsigned i = 0; i != NumSrcElts; ++i) {
82 if (
DL.isLittleEndian())
83 Element =
C->getAggregateElement(NumSrcElts - i - 1);
85 Element =
C->getAggregateElement(i);
87 if (isa_and_nonnull<UndefValue>(Element)) {
92 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
97 Result |= ElementCI->getValue().zext(
Result.getBitWidth());
108 "Invalid constantexpr bitcast!");
114 if (
auto *VTy = dyn_cast<VectorType>(
C->getType())) {
117 unsigned NumSrcElts = cast<FixedVectorType>(VTy)->getNumElements();
118 Type *SrcEltTy = VTy->getElementType();
131 if (
Constant *CE = foldConstVectorToAPInt(Result, DestTy,
C,
132 SrcEltTy, NumSrcElts,
DL))
135 if (isa<IntegerType>(DestTy))
136 return ConstantInt::get(DestTy, Result);
144 auto *DestVTy = dyn_cast<VectorType>(DestTy);
150 if (!isa<VectorType>(
C->getType()) &&
151 (isa<ConstantFP>(
C) || isa<ConstantInt>(
C))) {
158 if (!isa<FixedVectorType>(
C->getType()))
162 if (!isa<ConstantDataVector>(
C) && !isa<ConstantVector>(
C) &&
163 !isa<ConstantInt>(
C) && !isa<ConstantFP>(
C))
167 unsigned NumDstElt = cast<FixedVectorType>(DestVTy)->getNumElements();
168 unsigned NumSrcElt = cast<FixedVectorType>(
C->getType())->getNumElements();
169 if (NumDstElt == NumSrcElt)
172 Type *SrcEltTy = cast<VectorType>(
C->getType())->getElementType();
173 Type *DstEltTy = DestVTy->getElementType();
205 assert((isa<ConstantVector>(
C) ||
206 isa<ConstantDataVector>(
C) || isa<ConstantInt>(
C)) &&
207 "Constant folding cannot fail for plain fp->int bitcast!");
214 bool isLittleEndian =
DL.isLittleEndian();
217 if (NumDstElt < NumSrcElt) {
220 unsigned Ratio = NumSrcElt/NumDstElt;
223 for (
unsigned i = 0; i != NumDstElt; ++i) {
226 unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1);
227 for (
unsigned j = 0;
j != Ratio; ++
j) {
228 Constant *Src =
C->getAggregateElement(SrcElt++);
229 if (isa_and_nonnull<UndefValue>(Src))
231 cast<VectorType>(
C->getType())->getElementType());
233 Src = dyn_cast_or_null<ConstantInt>(Src);
240 assert(Src &&
"Constant folding cannot fail on plain integers");
244 Instruction::Shl, Src, ConstantInt::get(Src->getType(), ShiftAmt),
246 assert(Src &&
"Constant folding cannot fail on plain integers");
248 ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
252 assert(Elt &&
"Constant folding cannot fail on plain integers");
260 unsigned Ratio = NumDstElt/NumSrcElt;
261 unsigned DstBitSize =
DL.getTypeSizeInBits(DstEltTy);
264 for (
unsigned i = 0; i != NumSrcElt; ++i) {
265 auto *Element =
C->getAggregateElement(i);
270 if (isa<UndefValue>(Element)) {
276 auto *Src = dyn_cast<ConstantInt>(Element);
280 unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1);
281 for (
unsigned j = 0;
j != Ratio; ++
j) {
284 APInt Elt = Src->getValue().lshr(ShiftAmt);
285 ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
288 Result.push_back(ConstantInt::get(DstEltTy, Elt.
trunc(DstBitSize)));
306 if ((GV = dyn_cast<GlobalValue>(
C))) {
312 if (
auto *FoundDSOEquiv = dyn_cast<DSOLocalEquivalent>(
C)) {
314 *DSOEquiv = FoundDSOEquiv;
315 GV = FoundDSOEquiv->getGlobalValue();
322 auto *CE = dyn_cast<ConstantExpr>(
C);
323 if (!CE)
return false;
326 if (CE->getOpcode() == Instruction::PtrToInt ||
327 CE->getOpcode() == Instruction::BitCast)
332 auto *
GEP = dyn_cast<GEPOperator>(CE);
336 unsigned BitWidth =
DL.getIndexTypeSizeInBits(
GEP->getType());
345 if (!
GEP->accumulateConstantOffset(
DL, TmpOffset))
355 Type *SrcTy =
C->getType();
359 TypeSize DestSize =
DL.getTypeSizeInBits(DestTy);
360 TypeSize SrcSize =
DL.getTypeSizeInBits(SrcTy);
361 if (!TypeSize::isKnownGE(SrcSize, DestSize))
372 if (SrcSize == DestSize &&
379 Cast = Instruction::IntToPtr;
381 Cast = Instruction::PtrToInt;
402 ElemC =
C->getAggregateElement(Elem++);
403 }
while (ElemC &&
DL.getTypeSizeInBits(ElemC->
getType()).isZero());
408 if (
auto *VT = dyn_cast<VectorType>(SrcTy))
409 if (!
DL.typeSizeEqualsStoreSize(VT->getElementType()))
412 C =
C->getAggregateElement(0u);
427 assert(ByteOffset <=
DL.getTypeAllocSize(
C->getType()) &&
428 "Out of range access");
432 if (isa<ConstantAggregateZero>(
C) || isa<UndefValue>(
C))
435 if (
auto *CI = dyn_cast<ConstantInt>(
C)) {
436 if ((CI->getBitWidth() & 7) != 0)
438 const APInt &Val = CI->getValue();
439 unsigned IntBytes =
unsigned(CI->getBitWidth()/8);
441 for (
unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) {
442 unsigned n = ByteOffset;
443 if (!
DL.isLittleEndian())
444 n = IntBytes - n - 1;
451 if (
auto *CFP = dyn_cast<ConstantFP>(
C)) {
452 if (CFP->getType()->isDoubleTy()) {
454 return ReadDataFromGlobal(
C, ByteOffset, CurPtr, BytesLeft,
DL);
456 if (CFP->getType()->isFloatTy()){
458 return ReadDataFromGlobal(
C, ByteOffset, CurPtr, BytesLeft,
DL);
460 if (CFP->getType()->isHalfTy()){
462 return ReadDataFromGlobal(
C, ByteOffset, CurPtr, BytesLeft,
DL);
467 if (
auto *CS = dyn_cast<ConstantStruct>(
C)) {
471 ByteOffset -= CurEltOffset;
476 uint64_t EltSize =
DL.getTypeAllocSize(CS->getOperand(Index)->getType());
478 if (ByteOffset < EltSize &&
479 !ReadDataFromGlobal(CS->getOperand(Index), ByteOffset, CurPtr,
486 if (Index == CS->getType()->getNumElements())
492 if (BytesLeft <= NextEltOffset - CurEltOffset - ByteOffset)
496 CurPtr += NextEltOffset - CurEltOffset - ByteOffset;
497 BytesLeft -= NextEltOffset - CurEltOffset - ByteOffset;
499 CurEltOffset = NextEltOffset;
504 if (isa<ConstantArray>(
C) || isa<ConstantVector>(
C) ||
505 isa<ConstantDataSequential>(
C)) {
508 if (
auto *AT = dyn_cast<ArrayType>(
C->getType())) {
509 NumElts = AT->getNumElements();
510 EltTy = AT->getElementType();
511 EltSize =
DL.getTypeAllocSize(EltTy);
513 NumElts = cast<FixedVectorType>(
C->getType())->getNumElements();
514 EltTy = cast<FixedVectorType>(
C->getType())->getElementType();
517 if (!
DL.typeSizeEqualsStoreSize(EltTy))
520 EltSize =
DL.getTypeStoreSize(EltTy);
526 if (!ReadDataFromGlobal(
C->getAggregateElement(Index),
Offset, CurPtr,
531 assert(BytesWritten <= EltSize &&
"Not indexing into this element?");
532 if (BytesWritten >= BytesLeft)
536 BytesLeft -= BytesWritten;
537 CurPtr += BytesWritten;
542 if (
auto *CE = dyn_cast<ConstantExpr>(
C)) {
543 if (
CE->getOpcode() == Instruction::IntToPtr &&
544 CE->getOperand(0)->getType() ==
DL.getIntPtrType(
CE->getType())) {
545 return ReadDataFromGlobal(
CE->getOperand(0), ByteOffset, CurPtr,
557 if (isa<ScalableVectorType>(LoadTy))
560 auto *IntType = dyn_cast<IntegerType>(LoadTy);
573 DL.getTypeSizeInBits(LoadTy).getFixedValue());
594 unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8;
595 if (BytesLoaded > 32 || BytesLoaded == 0)
599 if (
Offset <= -1 *
static_cast<int64_t
>(BytesLoaded))
603 TypeSize InitializerSize =
DL.getTypeAllocSize(
C->getType());
611 unsigned char RawBytes[32] = {0};
612 unsigned char *CurPtr = RawBytes;
613 unsigned BytesLeft = BytesLoaded;
622 if (!ReadDataFromGlobal(
C,
Offset, CurPtr, BytesLeft,
DL))
625 APInt ResultVal =
APInt(IntType->getBitWidth(), 0);
626 if (
DL.isLittleEndian()) {
627 ResultVal = RawBytes[BytesLoaded - 1];
628 for (
unsigned i = 1; i != BytesLoaded; ++i) {
630 ResultVal |= RawBytes[BytesLoaded - 1 - i];
633 ResultVal = RawBytes[0];
634 for (
unsigned i = 1; i != BytesLoaded; ++i) {
636 ResultVal |= RawBytes[i];
640 return ConstantInt::get(IntType->getContext(), ResultVal);
660 if (NBytes > UINT16_MAX)
668 unsigned char *CurPtr = RawBytes.
data();
670 if (!ReadDataFromGlobal(
Init,
Offset, CurPtr, NBytes,
DL))
683 if (!isa<ConstantAggregate>(
Base) && !isa<ConstantDataSequential>(
Base))
688 if (!
Offset.isZero() || !Indices[0].isZero())
693 if (Index.isNegative() || Index.getActiveBits() >= 32)
696 C =
C->getAggregateElement(Index.getZExtValue());
722 if (
Offset.getSignificantBits() <= 64)
724 FoldReinterpretLoadFromConst(
C, Ty,
Offset.getSExtValue(),
DL))
741 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
744 C = cast<Constant>(
C->stripAndAccumulateConstantOffsets(
765 if (isa<PoisonValue>(
C))
767 if (isa<UndefValue>(
C))
771 if (!
DL.typeSizeEqualsStoreSize(
C->getType()))
775 if (
C->isAllOnesValue() &&
795 if (Opc == Instruction::And) {
798 if ((Known1.
One | Known0.
Zero).isAllOnes()) {
802 if ((Known0.
One | Known1.
Zero).isAllOnes()) {
814 if (Opc == Instruction::Sub) {
820 unsigned OpSize =
DL.getTypeSizeInBits(Op0->
getType());
837 std::optional<ConstantRange>
InRange,
839 Type *IntIdxTy =
DL.getIndexType(ResultTy);
844 for (
unsigned i = 1, e = Ops.
size(); i != e; ++i) {
847 SrcElemTy, Ops.
slice(1, i - 1)))) &&
848 Ops[i]->getType()->getScalarType() != IntIdxScalarTy) {
851 Ops[i]->getType()->isVectorTy() ? IntIdxTy : IntIdxScalarTy;
875 Type *SrcElemTy =
GEP->getSourceElementType();
877 if (!SrcElemTy->
isSized() || isa<ScalableVectorType>(SrcElemTy))
880 if (
Constant *
C = CastGEPIndices(SrcElemTy, Ops, ResTy,
GEP->getNoWrapFlags(),
881 GEP->getInRange(),
DL, TLI))
885 if (!
Ptr->getType()->isPointerTy())
888 Type *IntIdxTy =
DL.getIndexType(
Ptr->getType());
890 for (
unsigned i = 1, e = Ops.
size(); i != e; ++i)
891 if (!isa<ConstantInt>(Ops[i]) || !Ops[i]->
getType()->isIntegerTy())
894 unsigned BitWidth =
DL.getTypeSizeInBits(IntIdxTy);
897 DL.getIndexedOffsetInType(
901 std::optional<ConstantRange>
InRange =
GEP->getInRange();
907 bool Overflow =
false;
908 while (
auto *
GEP = dyn_cast<GEPOperator>(
Ptr)) {
909 NW &=
GEP->getNoWrapFlags();
914 bool AllConstantInt =
true;
915 for (
Value *NestedOp : NestedOps)
916 if (!isa<ConstantInt>(NestedOp)) {
917 AllConstantInt =
false;
931 Ptr = cast<Constant>(
GEP->getOperand(0));
932 SrcElemTy =
GEP->getSourceElementType();
947 if (
auto *CE = dyn_cast<ConstantExpr>(
Ptr)) {
948 if (
CE->getOpcode() == Instruction::IntToPtr) {
949 if (
auto *
Base = dyn_cast<ConstantInt>(
CE->getOperand(0)))
954 auto *PTy = cast<PointerType>(
Ptr->getType());
955 if ((
Ptr->isNullValue() || BasePtr != 0) &&
956 !
DL.isNonIntegralPointerType(PTy)) {
963 bool CanBeNull, CanBeFreed;
965 Ptr->getPointerDereferenceableBytes(
DL, CanBeNull, CanBeFreed);
966 if (DerefBytes != 0 && !CanBeNull &&
Offset.sle(DerefBytes))
977 ConstantInt::get(Ctx,
Offset), NW,
986Constant *ConstantFoldInstOperandsImpl(
const Value *InstOrCE,
unsigned Opcode,
990 bool AllowNonDeterministic) {
1000 case Instruction::FAdd:
1001 case Instruction::FSub:
1002 case Instruction::FMul:
1003 case Instruction::FDiv:
1004 case Instruction::FRem:
1008 if (
const auto *
I = dyn_cast<Instruction>(InstOrCE)) {
1010 AllowNonDeterministic);
1019 if (
auto *
GEP = dyn_cast<GEPOperator>(InstOrCE)) {
1020 Type *SrcElemTy =
GEP->getSourceElementType();
1028 GEP->getNoWrapFlags(),
1032 if (
auto *CE = dyn_cast<ConstantExpr>(InstOrCE))
1033 return CE->getWithOperands(Ops);
1036 default:
return nullptr;
1037 case Instruction::ICmp:
1038 case Instruction::FCmp: {
1039 auto *
C = cast<CmpInst>(InstOrCE);
1043 case Instruction::Freeze:
1045 case Instruction::Call:
1046 if (
auto *
F = dyn_cast<Function>(Ops.
back())) {
1047 const auto *
Call = cast<CallBase>(InstOrCE);
1050 AllowNonDeterministic);
1053 case Instruction::Select:
1055 case Instruction::ExtractElement:
1057 case Instruction::ExtractValue:
1059 Ops[0], cast<ExtractValueInst>(InstOrCE)->getIndices());
1060 case Instruction::InsertElement:
1062 case Instruction::InsertValue:
1064 Ops[0], Ops[1], cast<InsertValueInst>(InstOrCE)->getIndices());
1065 case Instruction::ShuffleVector:
1067 Ops[0], Ops[1], cast<ShuffleVectorInst>(InstOrCE)->getShuffleMask());
1068 case Instruction::Load: {
1069 const auto *LI = dyn_cast<LoadInst>(InstOrCE);
1070 if (LI->isVolatile())
1089 if (!isa<ConstantVector>(
C) && !isa<ConstantExpr>(
C))
1093 for (
const Use &OldU :
C->operands()) {
1094 Constant *OldC = cast<Constant>(&OldU);
1098 if (isa<ConstantVector>(OldC) || isa<ConstantExpr>(OldC)) {
1099 auto It = FoldedOps.
find(OldC);
1100 if (It == FoldedOps.
end()) {
1101 NewC = ConstantFoldConstantImpl(OldC,
DL, TLI, FoldedOps);
1102 FoldedOps.
insert({OldC, NewC});
1110 if (
auto *CE = dyn_cast<ConstantExpr>(
C)) {
1111 if (
Constant *Res = ConstantFoldInstOperandsImpl(
1112 CE,
CE->getOpcode(), Ops,
DL, TLI,
true))
1117 assert(isa<ConstantVector>(
C));
1126 if (
auto *PN = dyn_cast<PHINode>(
I)) {
1142 C = ConstantFoldConstantImpl(
C,
DL, TLI, FoldedOps);
1145 if (CommonValue &&
C != CommonValue)
1156 if (!
all_of(
I->operands(), [](
Use &U) { return isa<Constant>(U); }))
1161 for (
const Use &OpU :
I->operands()) {
1162 auto *
Op = cast<Constant>(&OpU);
1164 Op = ConstantFoldConstantImpl(
Op,
DL, TLI, FoldedOps);
1174 return ConstantFoldConstantImpl(
C,
DL, TLI, FoldedOps);
1181 bool AllowNonDeterministic) {
1182 return ConstantFoldInstOperandsImpl(
I,
I->getOpcode(), Ops,
DL, TLI,
1183 AllowNonDeterministic);
1200 if (
auto *CE0 = dyn_cast<ConstantExpr>(Ops0)) {
1202 if (CE0->getOpcode() == Instruction::IntToPtr) {
1203 Type *IntPtrTy =
DL.getIntPtrType(CE0->getType());
1215 if (CE0->getOpcode() == Instruction::PtrToInt) {
1216 Type *IntPtrTy =
DL.getIntPtrType(CE0->getOperand(0)->getType());
1217 if (CE0->getType() == IntPtrTy) {
1225 if (
auto *CE1 = dyn_cast<ConstantExpr>(Ops1)) {
1226 if (CE0->getOpcode() == CE1->getOpcode()) {
1227 if (CE0->getOpcode() == Instruction::IntToPtr) {
1228 Type *IntPtrTy =
DL.getIntPtrType(CE0->getType());
1242 if (CE0->getOpcode() == Instruction::PtrToInt) {
1243 Type *IntPtrTy =
DL.getIntPtrType(CE0->getOperand(0)->getType());
1244 if (CE0->getType() == IntPtrTy &&
1245 CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType()) {
1247 Predicate, CE0->getOperand(0), CE1->getOperand(0),
DL, TLI);
1259 unsigned IndexWidth =
DL.getIndexTypeSizeInBits(Ops0->
getType());
1260 APInt Offset0(IndexWidth, 0);
1263 APInt Offset1(IndexWidth, 0);
1266 if (Stripped0 == Stripped1)
1272 }
else if (isa<ConstantExpr>(Ops1)) {
1275 Predicate = ICmpInst::getSwappedPredicate(Predicate);
1304 if (isa<ConstantExpr>(
LHS) || isa<ConstantExpr>(
RHS))
1319 return ConstantFP::get(Ty->
getContext(), APF);
1321 return ConstantFP::get(
1351 IsOutput ? Mode.Output : Mode.Input);
1356 if (
ConstantFP *CFP = dyn_cast<ConstantFP>(Operand))
1359 if (isa<ConstantAggregateZero, UndefValue, ConstantExpr>(Operand))
1363 VectorType *VecTy = dyn_cast<VectorType>(Ty);
1375 if (
const auto *CV = dyn_cast<ConstantVector>(Operand)) {
1377 for (
unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1379 if (isa<UndefValue>(Element)) {
1384 ConstantFP *CFP = dyn_cast<ConstantFP>(Element);
1397 if (
const auto *CDV = dyn_cast<ConstantDataVector>(Operand)) {
1399 for (
unsigned I = 0, E = CDV->getNumElements();
I < E; ++
I) {
1400 const APFloat &Elt = CDV->getElementAsAPFloat(
I);
1402 NewElts.
push_back(ConstantFP::get(Ty, Elt));
1422 bool AllowNonDeterministic) {
1435 if (!AllowNonDeterministic)
1436 if (
auto *
FP = dyn_cast_or_null<FPMathOperator>(
I))
1437 if (
FP->hasNoSignedZeros() ||
FP->hasAllowReassoc() ||
1438 FP->hasAllowContract() ||
FP->hasAllowReciprocal())
1452 if (!AllowNonDeterministic &&
C->isNaN())
1468 case Instruction::PtrToInt:
1469 if (
auto *CE = dyn_cast<ConstantExpr>(
C)) {
1473 if (CE->getOpcode() == Instruction::IntToPtr) {
1476 DL.getIntPtrType(CE->getType()),
1478 }
else if (
auto *
GEP = dyn_cast<GEPOperator>(CE)) {
1482 unsigned BitWidth =
DL.getIndexTypeSizeInBits(
GEP->getType());
1484 auto *
Base = cast<Constant>(
GEP->stripAndAccumulateConstantOffsets(
1485 DL, BaseOffset,
true));
1486 if (
Base->isNullValue()) {
1487 FoldedValue = ConstantInt::get(CE->getContext(), BaseOffset);
1490 if (
GEP->getNumIndices() == 1 &&
1491 GEP->getSourceElementType()->isIntegerTy(8)) {
1492 auto *
Ptr = cast<Constant>(
GEP->getPointerOperand());
1493 auto *Sub = dyn_cast<ConstantExpr>(
GEP->getOperand(1));
1494 Type *IntIdxTy =
DL.getIndexType(
Ptr->getType());
1495 if (Sub && Sub->getType() == IntIdxTy &&
1496 Sub->getOpcode() == Instruction::Sub &&
1497 Sub->getOperand(0)->isNullValue())
1510 case Instruction::IntToPtr:
1515 if (
auto *CE = dyn_cast<ConstantExpr>(
C)) {
1516 if (CE->getOpcode() == Instruction::PtrToInt) {
1517 Constant *SrcPtr = CE->getOperand(0);
1518 unsigned SrcPtrSize =
DL.getPointerTypeSizeInBits(SrcPtr->
getType());
1519 unsigned MidIntSize = CE->getType()->getScalarSizeInBits();
1521 if (MidIntSize >= SrcPtrSize) {
1529 case Instruction::Trunc:
1530 case Instruction::ZExt:
1531 case Instruction::SExt:
1532 case Instruction::FPTrunc:
1533 case Instruction::FPExt:
1534 case Instruction::UIToFP:
1535 case Instruction::SIToFP:
1536 case Instruction::FPToUI:
1537 case Instruction::FPToSI:
1538 case Instruction::AddrSpaceCast:
1540 case Instruction::BitCast:
1551 Type *SrcTy =
C->getType();
1552 if (SrcTy == DestTy)
1566 if (Call->isNoBuiltin())
1568 if (Call->getFunctionType() !=
F->getFunctionType())
1570 switch (
F->getIntrinsicID()) {
1573 case Intrinsic::bswap:
1574 case Intrinsic::ctpop:
1575 case Intrinsic::ctlz:
1576 case Intrinsic::cttz:
1577 case Intrinsic::fshl:
1578 case Intrinsic::fshr:
1579 case Intrinsic::launder_invariant_group:
1580 case Intrinsic::strip_invariant_group:
1581 case Intrinsic::masked_load:
1582 case Intrinsic::get_active_lane_mask:
1583 case Intrinsic::abs:
1584 case Intrinsic::smax:
1585 case Intrinsic::smin:
1586 case Intrinsic::umax:
1587 case Intrinsic::umin:
1588 case Intrinsic::scmp:
1589 case Intrinsic::ucmp:
1590 case Intrinsic::sadd_with_overflow:
1591 case Intrinsic::uadd_with_overflow:
1592 case Intrinsic::ssub_with_overflow:
1593 case Intrinsic::usub_with_overflow:
1594 case Intrinsic::smul_with_overflow:
1595 case Intrinsic::umul_with_overflow:
1596 case Intrinsic::sadd_sat:
1597 case Intrinsic::uadd_sat:
1598 case Intrinsic::ssub_sat:
1599 case Intrinsic::usub_sat:
1600 case Intrinsic::smul_fix:
1601 case Intrinsic::smul_fix_sat:
1602 case Intrinsic::bitreverse:
1603 case Intrinsic::is_constant:
1604 case Intrinsic::vector_reduce_add:
1605 case Intrinsic::vector_reduce_mul:
1606 case Intrinsic::vector_reduce_and:
1607 case Intrinsic::vector_reduce_or:
1608 case Intrinsic::vector_reduce_xor:
1609 case Intrinsic::vector_reduce_smin:
1610 case Intrinsic::vector_reduce_smax:
1611 case Intrinsic::vector_reduce_umin:
1612 case Intrinsic::vector_reduce_umax:
1614 case Intrinsic::amdgcn_perm:
1615 case Intrinsic::amdgcn_wave_reduce_umin:
1616 case Intrinsic::amdgcn_wave_reduce_umax:
1617 case Intrinsic::amdgcn_s_wqm:
1618 case Intrinsic::amdgcn_s_quadmask:
1619 case Intrinsic::amdgcn_s_bitreplicate:
1620 case Intrinsic::arm_mve_vctp8:
1621 case Intrinsic::arm_mve_vctp16:
1622 case Intrinsic::arm_mve_vctp32:
1623 case Intrinsic::arm_mve_vctp64:
1624 case Intrinsic::aarch64_sve_convert_from_svbool:
1626 case Intrinsic::wasm_trunc_signed:
1627 case Intrinsic::wasm_trunc_unsigned:
1632 case Intrinsic::minnum:
1633 case Intrinsic::maxnum:
1634 case Intrinsic::minimum:
1635 case Intrinsic::maximum:
1636 case Intrinsic::log:
1637 case Intrinsic::log2:
1638 case Intrinsic::log10:
1639 case Intrinsic::exp:
1640 case Intrinsic::exp2:
1641 case Intrinsic::exp10:
1642 case Intrinsic::sqrt:
1643 case Intrinsic::sin:
1644 case Intrinsic::cos:
1645 case Intrinsic::sincos:
1646 case Intrinsic::pow:
1647 case Intrinsic::powi:
1648 case Intrinsic::ldexp:
1649 case Intrinsic::fma:
1650 case Intrinsic::fmuladd:
1651 case Intrinsic::frexp:
1652 case Intrinsic::fptoui_sat:
1653 case Intrinsic::fptosi_sat:
1654 case Intrinsic::convert_from_fp16:
1655 case Intrinsic::convert_to_fp16:
1656 case Intrinsic::amdgcn_cos:
1657 case Intrinsic::amdgcn_cubeid:
1658 case Intrinsic::amdgcn_cubema:
1659 case Intrinsic::amdgcn_cubesc:
1660 case Intrinsic::amdgcn_cubetc:
1661 case Intrinsic::amdgcn_fmul_legacy:
1662 case Intrinsic::amdgcn_fma_legacy:
1663 case Intrinsic::amdgcn_fract:
1664 case Intrinsic::amdgcn_sin:
1666 case Intrinsic::x86_sse_cvtss2si:
1667 case Intrinsic::x86_sse_cvtss2si64:
1668 case Intrinsic::x86_sse_cvttss2si:
1669 case Intrinsic::x86_sse_cvttss2si64:
1670 case Intrinsic::x86_sse2_cvtsd2si:
1671 case Intrinsic::x86_sse2_cvtsd2si64:
1672 case Intrinsic::x86_sse2_cvttsd2si:
1673 case Intrinsic::x86_sse2_cvttsd2si64:
1674 case Intrinsic::x86_avx512_vcvtss2si32:
1675 case Intrinsic::x86_avx512_vcvtss2si64:
1676 case Intrinsic::x86_avx512_cvttss2si:
1677 case Intrinsic::x86_avx512_cvttss2si64:
1678 case Intrinsic::x86_avx512_vcvtsd2si32:
1679 case Intrinsic::x86_avx512_vcvtsd2si64:
1680 case Intrinsic::x86_avx512_cvttsd2si:
1681 case Intrinsic::x86_avx512_cvttsd2si64:
1682 case Intrinsic::x86_avx512_vcvtss2usi32:
1683 case Intrinsic::x86_avx512_vcvtss2usi64:
1684 case Intrinsic::x86_avx512_cvttss2usi:
1685 case Intrinsic::x86_avx512_cvttss2usi64:
1686 case Intrinsic::x86_avx512_vcvtsd2usi32:
1687 case Intrinsic::x86_avx512_vcvtsd2usi64:
1688 case Intrinsic::x86_avx512_cvttsd2usi:
1689 case Intrinsic::x86_avx512_cvttsd2usi64:
1690 return !Call->isStrictFP();
1693 case Intrinsic::nvvm_f2i_rm:
1694 case Intrinsic::nvvm_f2i_rn:
1695 case Intrinsic::nvvm_f2i_rp:
1696 case Intrinsic::nvvm_f2i_rz:
1697 case Intrinsic::nvvm_f2i_rm_ftz:
1698 case Intrinsic::nvvm_f2i_rn_ftz:
1699 case Intrinsic::nvvm_f2i_rp_ftz:
1700 case Intrinsic::nvvm_f2i_rz_ftz:
1701 case Intrinsic::nvvm_f2ui_rm:
1702 case Intrinsic::nvvm_f2ui_rn:
1703 case Intrinsic::nvvm_f2ui_rp:
1704 case Intrinsic::nvvm_f2ui_rz:
1705 case Intrinsic::nvvm_f2ui_rm_ftz:
1706 case Intrinsic::nvvm_f2ui_rn_ftz:
1707 case Intrinsic::nvvm_f2ui_rp_ftz:
1708 case Intrinsic::nvvm_f2ui_rz_ftz:
1709 case Intrinsic::nvvm_d2i_rm:
1710 case Intrinsic::nvvm_d2i_rn:
1711 case Intrinsic::nvvm_d2i_rp:
1712 case Intrinsic::nvvm_d2i_rz:
1713 case Intrinsic::nvvm_d2ui_rm:
1714 case Intrinsic::nvvm_d2ui_rn:
1715 case Intrinsic::nvvm_d2ui_rp:
1716 case Intrinsic::nvvm_d2ui_rz:
1719 case Intrinsic::nvvm_f2ll_rm:
1720 case Intrinsic::nvvm_f2ll_rn:
1721 case Intrinsic::nvvm_f2ll_rp:
1722 case Intrinsic::nvvm_f2ll_rz:
1723 case Intrinsic::nvvm_f2ll_rm_ftz:
1724 case Intrinsic::nvvm_f2ll_rn_ftz:
1725 case Intrinsic::nvvm_f2ll_rp_ftz:
1726 case Intrinsic::nvvm_f2ll_rz_ftz:
1727 case Intrinsic::nvvm_f2ull_rm:
1728 case Intrinsic::nvvm_f2ull_rn:
1729 case Intrinsic::nvvm_f2ull_rp:
1730 case Intrinsic::nvvm_f2ull_rz:
1731 case Intrinsic::nvvm_f2ull_rm_ftz:
1732 case Intrinsic::nvvm_f2ull_rn_ftz:
1733 case Intrinsic::nvvm_f2ull_rp_ftz:
1734 case Intrinsic::nvvm_f2ull_rz_ftz:
1735 case Intrinsic::nvvm_d2ll_rm:
1736 case Intrinsic::nvvm_d2ll_rn:
1737 case Intrinsic::nvvm_d2ll_rp:
1738 case Intrinsic::nvvm_d2ll_rz:
1739 case Intrinsic::nvvm_d2ull_rm:
1740 case Intrinsic::nvvm_d2ull_rn:
1741 case Intrinsic::nvvm_d2ull_rp:
1742 case Intrinsic::nvvm_d2ull_rz:
1746 case Intrinsic::fabs:
1747 case Intrinsic::copysign:
1748 case Intrinsic::is_fpclass:
1751 case Intrinsic::ceil:
1752 case Intrinsic::floor:
1753 case Intrinsic::round:
1754 case Intrinsic::roundeven:
1755 case Intrinsic::trunc:
1756 case Intrinsic::nearbyint:
1757 case Intrinsic::rint:
1758 case Intrinsic::canonicalize:
1761 case Intrinsic::experimental_constrained_fma:
1762 case Intrinsic::experimental_constrained_fmuladd:
1763 case Intrinsic::experimental_constrained_fadd:
1764 case Intrinsic::experimental_constrained_fsub:
1765 case Intrinsic::experimental_constrained_fmul:
1766 case Intrinsic::experimental_constrained_fdiv:
1767 case Intrinsic::experimental_constrained_frem:
1768 case Intrinsic::experimental_constrained_ceil:
1769 case Intrinsic::experimental_constrained_floor:
1770 case Intrinsic::experimental_constrained_round:
1771 case Intrinsic::experimental_constrained_roundeven:
1772 case Intrinsic::experimental_constrained_trunc:
1773 case Intrinsic::experimental_constrained_nearbyint:
1774 case Intrinsic::experimental_constrained_rint:
1775 case Intrinsic::experimental_constrained_fcmp:
1776 case Intrinsic::experimental_constrained_fcmps:
1783 if (!
F->hasName() || Call->isStrictFP())
1794 return Name ==
"acos" ||
Name ==
"acosf" ||
1795 Name ==
"asin" ||
Name ==
"asinf" ||
1796 Name ==
"atan" ||
Name ==
"atanf" ||
1797 Name ==
"atan2" ||
Name ==
"atan2f";
1799 return Name ==
"ceil" ||
Name ==
"ceilf" ||
1803 return Name ==
"exp" ||
Name ==
"expf" ||
Name ==
"exp2" ||
1806 return Name ==
"fabs" ||
Name ==
"fabsf" ||
1807 Name ==
"floor" ||
Name ==
"floorf" ||
1810 return Name ==
"ilogb" ||
Name ==
"ilogbf";
1812 return Name ==
"log" ||
Name ==
"logf" ||
Name ==
"logl" ||
1813 Name ==
"log2" ||
Name ==
"log2f" ||
Name ==
"log10" ||
1814 Name ==
"log10f" ||
Name ==
"logb" ||
Name ==
"logbf" ||
1815 Name ==
"log1p" ||
Name ==
"log1pf";
1817 return Name ==
"nearbyint" ||
Name ==
"nearbyintf";
1819 return Name ==
"pow" ||
Name ==
"powf";
1821 return Name ==
"remainder" ||
Name ==
"remainderf" ||
1822 Name ==
"rint" ||
Name ==
"rintf" ||
1823 Name ==
"round" ||
Name ==
"roundf";
1825 return Name ==
"sin" ||
Name ==
"sinf" ||
1826 Name ==
"sinh" ||
Name ==
"sinhf" ||
1829 return Name ==
"tan" ||
Name ==
"tanf" ||
1830 Name ==
"tanh" ||
Name ==
"tanhf" ||
1831 Name ==
"trunc" ||
Name ==
"truncf";
1839 if (
Name.size() < 12 ||
Name[1] !=
'_')
1845 return Name ==
"__acos_finite" ||
Name ==
"__acosf_finite" ||
1846 Name ==
"__asin_finite" ||
Name ==
"__asinf_finite" ||
1847 Name ==
"__atan2_finite" ||
Name ==
"__atan2f_finite";
1849 return Name ==
"__cosh_finite" ||
Name ==
"__coshf_finite";
1851 return Name ==
"__exp_finite" ||
Name ==
"__expf_finite" ||
1852 Name ==
"__exp2_finite" ||
Name ==
"__exp2f_finite";
1854 return Name ==
"__log_finite" ||
Name ==
"__logf_finite" ||
1855 Name ==
"__log10_finite" ||
Name ==
"__log10f_finite";
1857 return Name ==
"__pow_finite" ||
Name ==
"__powf_finite";
1859 return Name ==
"__sinh_finite" ||
Name ==
"__sinhf_finite";
1870 APF.convert(Ty->
getFltSemantics(), APFloat::rmNearestTiesToEven, &unused);
1871 return ConstantFP::get(Ty->
getContext(), APF);
1878#if defined(HAS_IEE754_FLOAT128) && defined(HAS_LOGF128)
1879Constant *GetConstantFoldFPValue128(float128 V,
Type *Ty) {
1881 return ConstantFP::get(Ty, V);
1887inline void llvm_fenv_clearexcept() {
1888#if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT
1889 feclearexcept(FE_ALL_EXCEPT);
1895inline bool llvm_fenv_testexcept() {
1896 int errno_val = errno;
1897 if (errno_val == ERANGE || errno_val == EDOM)
1899#if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT && HAVE_DECL_FE_INEXACT
1900 if (fetestexcept(FE_ALL_EXCEPT & ~FE_INEXACT))
1914 llvm_fenv_clearexcept();
1915 double Result = NativeFP(
V.convertToDouble());
1916 if (llvm_fenv_testexcept()) {
1917 llvm_fenv_clearexcept();
1921 return GetConstantFoldFPValue(Result, Ty);
1924#if defined(HAS_IEE754_FLOAT128) && defined(HAS_LOGF128)
1925Constant *ConstantFoldFP128(float128 (*NativeFP)(float128),
const APFloat &V,
1927 llvm_fenv_clearexcept();
1928 float128
Result = NativeFP(
V.convertToQuad());
1929 if (llvm_fenv_testexcept()) {
1930 llvm_fenv_clearexcept();
1934 return GetConstantFoldFPValue128(Result, Ty);
1938Constant *ConstantFoldBinaryFP(
double (*NativeFP)(
double,
double),
1940 llvm_fenv_clearexcept();
1941 double Result = NativeFP(
V.convertToDouble(),
W.convertToDouble());
1942 if (llvm_fenv_testexcept()) {
1943 llvm_fenv_clearexcept();
1947 return GetConstantFoldFPValue(Result, Ty);
1957 if (isa<ConstantAggregateZero>(
Op))
1961 if (isa<PoisonValue>(
Op) ||
Op->containsPoisonElement())
1965 if (!isa<ConstantVector>(
Op) && !isa<ConstantDataVector>(
Op))
1968 auto *EltC = dyn_cast<ConstantInt>(
Op->getAggregateElement(0U));
1972 APInt Acc = EltC->getValue();
1974 if (!(EltC = dyn_cast<ConstantInt>(
Op->getAggregateElement(
I))))
1976 const APInt &
X = EltC->getValue();
1978 case Intrinsic::vector_reduce_add:
1981 case Intrinsic::vector_reduce_mul:
1984 case Intrinsic::vector_reduce_and:
1987 case Intrinsic::vector_reduce_or:
1990 case Intrinsic::vector_reduce_xor:
1993 case Intrinsic::vector_reduce_smin:
1996 case Intrinsic::vector_reduce_smax:
1999 case Intrinsic::vector_reduce_umin:
2002 case Intrinsic::vector_reduce_umax:
2008 return ConstantInt::get(
Op->getContext(), Acc);
2018Constant *ConstantFoldSSEConvertToInt(
const APFloat &Val,
bool roundTowardZero,
2019 Type *Ty,
bool IsSigned) {
2022 assert(ResultWidth <= 64 &&
2023 "Can only constant fold conversions to 64 and 32 bit ints");
2026 bool isExact =
false;
2028 : APFloat::rmNearestTiesToEven;
2031 IsSigned,
mode, &isExact);
2032 if (status != APFloat::opOK &&
2033 (!roundTowardZero || status != APFloat::opInexact))
2035 return ConstantInt::get(Ty, UIntVal, IsSigned);
2039 Type *Ty =
Op->getType();
2042 return Op->getValueAPF().convertToDouble();
2046 APF.
convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, &unused);
2051 if (
auto *CI = dyn_cast<ConstantInt>(
Op)) {
2052 C = &CI->getValue();
2055 if (isa<UndefValue>(
Op)) {
2074 if (St == APFloat::opStatus::opOK)
2079 if (ORM && *ORM == RoundingMode::Dynamic)
2084 if (EB && *EB != fp::ExceptionBehavior::ebStrict)
2096 if (!ORM || *ORM == RoundingMode::Dynamic)
2101 return RoundingMode::NearestTiesToEven;
2111 return ConstantFP::get(
2123 if (Src.isNormal() || Src.isInfinity())
2124 return ConstantFP::get(CI->
getContext(), Src);
2131 return ConstantFP::get(CI->
getContext(), Src);
2163 if (IntrinsicID == Intrinsic::is_constant) {
2167 if (
Operands[0]->isManifestConstant())
2172 if (isa<PoisonValue>(
Operands[0])) {
2174 if (IntrinsicID == Intrinsic::canonicalize)
2178 if (isa<UndefValue>(
Operands[0])) {
2182 if (IntrinsicID == Intrinsic::cos ||
2183 IntrinsicID == Intrinsic::ctpop ||
2184 IntrinsicID == Intrinsic::fptoui_sat ||
2185 IntrinsicID == Intrinsic::fptosi_sat ||
2186 IntrinsicID == Intrinsic::canonicalize)
2188 if (IntrinsicID == Intrinsic::bswap ||
2189 IntrinsicID == Intrinsic::bitreverse ||
2190 IntrinsicID == Intrinsic::launder_invariant_group ||
2191 IntrinsicID == Intrinsic::strip_invariant_group)
2195 if (isa<ConstantPointerNull>(
Operands[0])) {
2197 if (IntrinsicID == Intrinsic::launder_invariant_group ||
2198 IntrinsicID == Intrinsic::strip_invariant_group) {
2203 Call->getParent() ?
Call->getCaller() :
nullptr;
2213 if (
auto *
Op = dyn_cast<ConstantFP>(
Operands[0])) {
2214 if (IntrinsicID == Intrinsic::convert_to_fp16) {
2218 Val.
convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &lost);
2225 if (IntrinsicID == Intrinsic::wasm_trunc_signed ||
2226 IntrinsicID == Intrinsic::wasm_trunc_unsigned) {
2227 bool Signed = IntrinsicID == Intrinsic::wasm_trunc_signed;
2234 bool IsExact =
false;
2236 U.convertToInteger(
Int, APFloat::rmTowardZero, &IsExact);
2238 if (
Status == APFloat::opOK ||
Status == APFloat::opInexact)
2239 return ConstantInt::get(Ty,
Int);
2244 if (IntrinsicID == Intrinsic::fptoui_sat ||
2245 IntrinsicID == Intrinsic::fptosi_sat) {
2248 IntrinsicID == Intrinsic::fptoui_sat);
2250 U.convertToInteger(
Int, APFloat::rmTowardZero, &IsExact);
2251 return ConstantInt::get(Ty,
Int);
2254 if (IntrinsicID == Intrinsic::canonicalize)
2255 return constantFoldCanonicalize(Ty, Call, U);
2257#if defined(HAS_IEE754_FLOAT128) && defined(HAS_LOGF128)
2259 if (IntrinsicID == Intrinsic::log) {
2260 float128
Result = logf128(
Op->getValueAPF().convertToQuad());
2261 return GetConstantFoldFPValue128(Result, Ty);
2266 Fp128Func == LibFunc_logl)
2267 return ConstantFoldFP128(logf128,
Op->getValueAPF(), Ty);
2277 if (IntrinsicID == Intrinsic::nearbyint || IntrinsicID == Intrinsic::rint) {
2278 U.roundToIntegral(APFloat::rmNearestTiesToEven);
2282 if (IntrinsicID == Intrinsic::round) {
2283 U.roundToIntegral(APFloat::rmNearestTiesToAway);
2287 if (IntrinsicID == Intrinsic::roundeven) {
2288 U.roundToIntegral(APFloat::rmNearestTiesToEven);
2292 if (IntrinsicID == Intrinsic::ceil) {
2293 U.roundToIntegral(APFloat::rmTowardPositive);
2297 if (IntrinsicID == Intrinsic::floor) {
2298 U.roundToIntegral(APFloat::rmTowardNegative);
2302 if (IntrinsicID == Intrinsic::trunc) {
2303 U.roundToIntegral(APFloat::rmTowardZero);
2307 if (IntrinsicID == Intrinsic::fabs) {
2312 if (IntrinsicID == Intrinsic::amdgcn_fract) {
2318 FloorU.roundToIntegral(APFloat::rmTowardNegative);
2320 APFloat AlmostOne(
U.getSemantics(), 1);
2321 AlmostOne.next(
true);
2328 std::optional<APFloat::roundingMode>
RM;
2329 switch (IntrinsicID) {
2332 case Intrinsic::experimental_constrained_nearbyint:
2333 case Intrinsic::experimental_constrained_rint: {
2334 auto CI = cast<ConstrainedFPIntrinsic>(Call);
2335 RM = CI->getRoundingMode();
2336 if (!RM || *RM == RoundingMode::Dynamic)
2340 case Intrinsic::experimental_constrained_round:
2341 RM = APFloat::rmNearestTiesToAway;
2343 case Intrinsic::experimental_constrained_ceil:
2344 RM = APFloat::rmTowardPositive;
2346 case Intrinsic::experimental_constrained_floor:
2347 RM = APFloat::rmTowardNegative;
2349 case Intrinsic::experimental_constrained_trunc:
2350 RM = APFloat::rmTowardZero;
2354 auto CI = cast<ConstrainedFPIntrinsic>(Call);
2357 if (IntrinsicID == Intrinsic::experimental_constrained_rint &&
2358 St == APFloat::opInexact) {
2359 std::optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
2363 }
else if (
U.isSignaling()) {
2364 std::optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
2373 switch (IntrinsicID) {
2375 case Intrinsic::nvvm_f2i_rm:
2376 case Intrinsic::nvvm_f2i_rn:
2377 case Intrinsic::nvvm_f2i_rp:
2378 case Intrinsic::nvvm_f2i_rz:
2379 case Intrinsic::nvvm_f2i_rm_ftz:
2380 case Intrinsic::nvvm_f2i_rn_ftz:
2381 case Intrinsic::nvvm_f2i_rp_ftz:
2382 case Intrinsic::nvvm_f2i_rz_ftz:
2384 case Intrinsic::nvvm_f2ui_rm:
2385 case Intrinsic::nvvm_f2ui_rn:
2386 case Intrinsic::nvvm_f2ui_rp:
2387 case Intrinsic::nvvm_f2ui_rz:
2388 case Intrinsic::nvvm_f2ui_rm_ftz:
2389 case Intrinsic::nvvm_f2ui_rn_ftz:
2390 case Intrinsic::nvvm_f2ui_rp_ftz:
2391 case Intrinsic::nvvm_f2ui_rz_ftz:
2393 case Intrinsic::nvvm_d2i_rm:
2394 case Intrinsic::nvvm_d2i_rn:
2395 case Intrinsic::nvvm_d2i_rp:
2396 case Intrinsic::nvvm_d2i_rz:
2398 case Intrinsic::nvvm_d2ui_rm:
2399 case Intrinsic::nvvm_d2ui_rn:
2400 case Intrinsic::nvvm_d2ui_rp:
2401 case Intrinsic::nvvm_d2ui_rz:
2403 case Intrinsic::nvvm_f2ll_rm:
2404 case Intrinsic::nvvm_f2ll_rn:
2405 case Intrinsic::nvvm_f2ll_rp:
2406 case Intrinsic::nvvm_f2ll_rz:
2407 case Intrinsic::nvvm_f2ll_rm_ftz:
2408 case Intrinsic::nvvm_f2ll_rn_ftz:
2409 case Intrinsic::nvvm_f2ll_rp_ftz:
2410 case Intrinsic::nvvm_f2ll_rz_ftz:
2412 case Intrinsic::nvvm_f2ull_rm:
2413 case Intrinsic::nvvm_f2ull_rn:
2414 case Intrinsic::nvvm_f2ull_rp:
2415 case Intrinsic::nvvm_f2ull_rz:
2416 case Intrinsic::nvvm_f2ull_rm_ftz:
2417 case Intrinsic::nvvm_f2ull_rn_ftz:
2418 case Intrinsic::nvvm_f2ull_rp_ftz:
2419 case Intrinsic::nvvm_f2ull_rz_ftz:
2421 case Intrinsic::nvvm_d2ll_rm:
2422 case Intrinsic::nvvm_d2ll_rn:
2423 case Intrinsic::nvvm_d2ll_rp:
2424 case Intrinsic::nvvm_d2ll_rz:
2426 case Intrinsic::nvvm_d2ull_rm:
2427 case Intrinsic::nvvm_d2ull_rn:
2428 case Intrinsic::nvvm_d2ull_rp:
2429 case Intrinsic::nvvm_d2ull_rz: {
2432 return ConstantInt::get(Ty, 0);
2439 auto FloatToRound = IsFTZ ? FTZPreserveSign(U) :
U;
2441 bool IsExact =
false;
2443 FloatToRound.convertToInteger(ResInt, RMode, &IsExact);
2445 if (
Status != APFloat::opInvalidOp)
2446 return ConstantInt::get(Ty, ResInt);
2463 switch (IntrinsicID) {
2465 case Intrinsic::log:
2466 return ConstantFoldFP(log, APF, Ty);
2467 case Intrinsic::log2:
2469 return ConstantFoldFP(
log2, APF, Ty);
2470 case Intrinsic::log10:
2472 return ConstantFoldFP(log10, APF, Ty);
2473 case Intrinsic::exp:
2474 return ConstantFoldFP(exp, APF, Ty);
2475 case Intrinsic::exp2:
2477 return ConstantFoldBinaryFP(pow,
APFloat(2.0), APF, Ty);
2478 case Intrinsic::exp10:
2480 return ConstantFoldBinaryFP(pow,
APFloat(10.0), APF, Ty);
2481 case Intrinsic::sin:
2482 return ConstantFoldFP(sin, APF, Ty);
2483 case Intrinsic::cos:
2484 return ConstantFoldFP(cos, APF, Ty);
2485 case Intrinsic::sqrt:
2486 return ConstantFoldFP(sqrt, APF, Ty);
2487 case Intrinsic::amdgcn_cos:
2488 case Intrinsic::amdgcn_sin: {
2489 double V = getValueAsDouble(
Op);
2490 if (V < -256.0 || V > 256.0)
2495 bool IsCos = IntrinsicID == Intrinsic::amdgcn_cos;
2496 double V4 =
V * 4.0;
2497 if (V4 == floor(V4)) {
2499 const double SinVals[4] = { 0.0, 1.0, 0.0, -1.0 };
2500 V = SinVals[((int)V4 + (IsCos ? 1 : 0)) & 3];
2507 return GetConstantFoldFPValue(V, Ty);
2523 case LibFunc_acos_finite:
2524 case LibFunc_acosf_finite:
2526 return ConstantFoldFP(acos, APF, Ty);
2530 case LibFunc_asin_finite:
2531 case LibFunc_asinf_finite:
2533 return ConstantFoldFP(asin, APF, Ty);
2538 return ConstantFoldFP(atan, APF, Ty);
2542 if (TLI->
has(Func)) {
2543 U.roundToIntegral(APFloat::rmTowardPositive);
2550 return ConstantFoldFP(cos, APF, Ty);
2554 case LibFunc_cosh_finite:
2555 case LibFunc_coshf_finite:
2557 return ConstantFoldFP(cosh, APF, Ty);
2561 case LibFunc_exp_finite:
2562 case LibFunc_expf_finite:
2564 return ConstantFoldFP(exp, APF, Ty);
2568 case LibFunc_exp2_finite:
2569 case LibFunc_exp2f_finite:
2572 return ConstantFoldBinaryFP(pow,
APFloat(2.0), APF, Ty);
2576 if (TLI->
has(Func)) {
2582 case LibFunc_floorf:
2583 if (TLI->
has(Func)) {
2584 U.roundToIntegral(APFloat::rmTowardNegative);
2590 case LibFunc_log_finite:
2591 case LibFunc_logf_finite:
2593 return ConstantFoldFP(log, APF, Ty);
2597 case LibFunc_log2_finite:
2598 case LibFunc_log2f_finite:
2601 return ConstantFoldFP(
log2, APF, Ty);
2604 case LibFunc_log10f:
2605 case LibFunc_log10_finite:
2606 case LibFunc_log10f_finite:
2609 return ConstantFoldFP(log10, APF, Ty);
2612 case LibFunc_ilogbf:
2614 return ConstantInt::get(Ty,
ilogb(APF),
true);
2619 return ConstantFoldFP(logb, APF, Ty);
2622 case LibFunc_log1pf:
2627 return ConstantFoldFP(log1p, APF, Ty);
2634 return ConstantFoldFP(erf, APF, Ty);
2636 case LibFunc_nearbyint:
2637 case LibFunc_nearbyintf:
2640 if (TLI->
has(Func)) {
2641 U.roundToIntegral(APFloat::rmNearestTiesToEven);
2646 case LibFunc_roundf:
2647 if (TLI->
has(Func)) {
2648 U.roundToIntegral(APFloat::rmNearestTiesToAway);
2655 return ConstantFoldFP(sin, APF, Ty);
2659 case LibFunc_sinh_finite:
2660 case LibFunc_sinhf_finite:
2662 return ConstantFoldFP(sinh, APF, Ty);
2667 return ConstantFoldFP(sqrt, APF, Ty);
2672 return ConstantFoldFP(tan, APF, Ty);
2677 return ConstantFoldFP(tanh, APF, Ty);
2680 case LibFunc_truncf:
2681 if (TLI->
has(Func)) {
2682 U.roundToIntegral(APFloat::rmTowardZero);
2690 if (
auto *
Op = dyn_cast<ConstantInt>(
Operands[0])) {
2691 switch (IntrinsicID) {
2692 case Intrinsic::bswap:
2693 return ConstantInt::get(Ty->
getContext(),
Op->getValue().byteSwap());
2694 case Intrinsic::ctpop:
2695 return ConstantInt::get(Ty,
Op->getValue().popcount());
2696 case Intrinsic::bitreverse:
2697 return ConstantInt::get(Ty->
getContext(),
Op->getValue().reverseBits());
2698 case Intrinsic::convert_from_fp16: {
2699 APFloat Val(APFloat::IEEEhalf(),
Op->getValue());
2707 assert(status != APFloat::opInexact && !lost &&
2708 "Precision lost during fp16 constfolding");
2710 return ConstantFP::get(Ty->
getContext(), Val);
2713 case Intrinsic::amdgcn_s_wqm: {
2715 Val |= (Val & 0x5555555555555555ULL) << 1 |
2716 ((Val >> 1) & 0x5555555555555555ULL);
2717 Val |= (Val & 0x3333333333333333ULL) << 2 |
2718 ((Val >> 2) & 0x3333333333333333ULL);
2719 return ConstantInt::get(Ty, Val);
2722 case Intrinsic::amdgcn_s_quadmask: {
2725 for (
unsigned I = 0;
I <
Op->getBitWidth() / 4; ++
I, Val >>= 4) {
2729 QuadMask |= (1ULL <<
I);
2731 return ConstantInt::get(Ty, QuadMask);
2734 case Intrinsic::amdgcn_s_bitreplicate: {
2736 Val = (Val & 0x000000000000FFFFULL) | (Val & 0x00000000FFFF0000ULL) << 16;
2737 Val = (Val & 0x000000FF000000FFULL) | (Val & 0x0000FF000000FF00ULL) << 8;
2738 Val = (Val & 0x000F000F000F000FULL) | (Val & 0x00F000F000F000F0ULL) << 4;
2739 Val = (Val & 0x0303030303030303ULL) | (Val & 0x0C0C0C0C0C0C0C0CULL) << 2;
2740 Val = (Val & 0x1111111111111111ULL) | (Val & 0x2222222222222222ULL) << 1;
2741 Val = Val | Val << 1;
2742 return ConstantInt::get(Ty, Val);
2750 switch (IntrinsicID) {
2752 case Intrinsic::vector_reduce_add:
2753 case Intrinsic::vector_reduce_mul:
2754 case Intrinsic::vector_reduce_and:
2755 case Intrinsic::vector_reduce_or:
2756 case Intrinsic::vector_reduce_xor:
2757 case Intrinsic::vector_reduce_smin:
2758 case Intrinsic::vector_reduce_smax:
2759 case Intrinsic::vector_reduce_umin:
2760 case Intrinsic::vector_reduce_umax:
2767 if (isa<ConstantVector>(
Operands[0]) ||
2768 isa<ConstantDataVector>(
Operands[0])) {
2770 switch (IntrinsicID) {
2772 case Intrinsic::x86_sse_cvtss2si:
2773 case Intrinsic::x86_sse_cvtss2si64:
2774 case Intrinsic::x86_sse2_cvtsd2si:
2775 case Intrinsic::x86_sse2_cvtsd2si64:
2777 dyn_cast_or_null<ConstantFP>(
Op->getAggregateElement(0U)))
2778 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2782 case Intrinsic::x86_sse_cvttss2si:
2783 case Intrinsic::x86_sse_cvttss2si64:
2784 case Intrinsic::x86_sse2_cvttsd2si:
2785 case Intrinsic::x86_sse2_cvttsd2si64:
2787 dyn_cast_or_null<ConstantFP>(
Op->getAggregateElement(0U)))
2788 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2801 auto *FCmp = cast<ConstrainedFPCmpIntrinsic>(Call);
2803 if (FCmp->isSignaling()) {
2805 St = APFloat::opInvalidOp;
2808 St = APFloat::opInvalidOp;
2812 return ConstantInt::get(
Call->getType()->getScalarType(), Result);
2826 const auto *Op1 = dyn_cast<ConstantFP>(
Operands[0]);
2830 const auto *Op2 = dyn_cast<ConstantFP>(
Operands[1]);
2834 const APFloat &Op1V = Op1->getValueAPF();
2835 const APFloat &Op2V = Op2->getValueAPF();
2842 case LibFunc_pow_finite:
2843 case LibFunc_powf_finite:
2845 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
2849 if (TLI->
has(Func)) {
2851 if (APFloat::opStatus::opOK ==
V.mod(Op2->getValueAPF()))
2855 case LibFunc_remainder:
2856 case LibFunc_remainderf:
2857 if (TLI->
has(Func)) {
2859 if (APFloat::opStatus::opOK ==
V.remainder(Op2->getValueAPF()))
2864 case LibFunc_atan2f:
2870 case LibFunc_atan2_finite:
2871 case LibFunc_atan2f_finite:
2873 return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty);
2888 bool IsOp0Undef = isa<UndefValue>(
Operands[0]);
2889 bool IsOp1Undef = isa<UndefValue>(
Operands[1]);
2890 switch (IntrinsicID) {
2891 case Intrinsic::maxnum:
2892 case Intrinsic::minnum:
2893 case Intrinsic::maximum:
2894 case Intrinsic::minimum:
2904 if (
const auto *Op1 = dyn_cast<ConstantFP>(
Operands[0])) {
2905 const APFloat &Op1V = Op1->getValueAPF();
2907 if (
const auto *Op2 = dyn_cast<ConstantFP>(
Operands[1])) {
2908 if (Op2->getType() != Op1->getType())
2910 const APFloat &Op2V = Op2->getValueAPF();
2912 if (
const auto *ConstrIntr =
2913 dyn_cast_if_present<ConstrainedFPIntrinsic>(Call)) {
2917 switch (IntrinsicID) {
2920 case Intrinsic::experimental_constrained_fadd:
2921 St = Res.
add(Op2V, RM);
2923 case Intrinsic::experimental_constrained_fsub:
2926 case Intrinsic::experimental_constrained_fmul:
2929 case Intrinsic::experimental_constrained_fdiv:
2930 St = Res.
divide(Op2V, RM);
2932 case Intrinsic::experimental_constrained_frem:
2935 case Intrinsic::experimental_constrained_fcmp:
2936 case Intrinsic::experimental_constrained_fcmps:
2937 return evaluateCompare(Op1V, Op2V, ConstrIntr);
2941 return ConstantFP::get(Ty->
getContext(), Res);
2945 switch (IntrinsicID) {
2948 case Intrinsic::copysign:
2950 case Intrinsic::minnum:
2952 case Intrinsic::maxnum:
2954 case Intrinsic::minimum:
2956 case Intrinsic::maximum:
2963 switch (IntrinsicID) {
2966 case Intrinsic::pow:
2967 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
2968 case Intrinsic::amdgcn_fmul_legacy:
2973 return ConstantFP::get(Ty->
getContext(), Op1V * Op2V);
2976 }
else if (
auto *Op2C = dyn_cast<ConstantInt>(
Operands[1])) {
2977 switch (IntrinsicID) {
2978 case Intrinsic::ldexp: {
2979 return ConstantFP::get(
2981 scalbn(Op1V, Op2C->getSExtValue(), APFloat::rmNearestTiesToEven));
2983 case Intrinsic::is_fpclass: {
2996 return ConstantInt::get(Ty, Result);
2998 case Intrinsic::powi: {
2999 int Exp =
static_cast<int>(Op2C->getSExtValue());
3006 Res.
convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven,
3009 return ConstantFP::get(Ty->
getContext(), Res);
3026 const APInt *C0, *C1;
3027 if (!getConstIntOrUndef(
Operands[0], C0) ||
3028 !getConstIntOrUndef(
Operands[1], C1))
3031 switch (IntrinsicID) {
3033 case Intrinsic::smax:
3034 case Intrinsic::smin:
3035 case Intrinsic::umax:
3036 case Intrinsic::umin:
3046 return ConstantInt::get(
3052 case Intrinsic::scmp:
3053 case Intrinsic::ucmp:
3058 return ConstantInt::get(Ty, 0);
3061 if (IntrinsicID == Intrinsic::scmp)
3062 Res = C0->
sgt(*C1) ? 1 : C0->
slt(*C1) ? -1 : 0;
3064 Res = C0->
ugt(*C1) ? 1 : C0->
ult(*C1) ? -1 : 0;
3065 return ConstantInt::get(Ty, Res,
true);
3067 case Intrinsic::usub_with_overflow:
3068 case Intrinsic::ssub_with_overflow:
3074 case Intrinsic::uadd_with_overflow:
3075 case Intrinsic::sadd_with_overflow:
3080 cast<StructType>(Ty),
3085 case Intrinsic::smul_with_overflow:
3086 case Intrinsic::umul_with_overflow: {
3094 switch (IntrinsicID) {
3096 case Intrinsic::sadd_with_overflow:
3097 Res = C0->
sadd_ov(*C1, Overflow);
3099 case Intrinsic::uadd_with_overflow:
3100 Res = C0->
uadd_ov(*C1, Overflow);
3102 case Intrinsic::ssub_with_overflow:
3103 Res = C0->
ssub_ov(*C1, Overflow);
3105 case Intrinsic::usub_with_overflow:
3106 Res = C0->
usub_ov(*C1, Overflow);
3108 case Intrinsic::smul_with_overflow:
3109 Res = C0->
smul_ov(*C1, Overflow);
3111 case Intrinsic::umul_with_overflow:
3112 Res = C0->
umul_ov(*C1, Overflow);
3121 case Intrinsic::uadd_sat:
3122 case Intrinsic::sadd_sat:
3132 if (IntrinsicID == Intrinsic::uadd_sat)
3133 return ConstantInt::get(Ty, C0->
uadd_sat(*C1));
3135 return ConstantInt::get(Ty, C0->
sadd_sat(*C1));
3136 case Intrinsic::usub_sat:
3137 case Intrinsic::ssub_sat:
3147 if (IntrinsicID == Intrinsic::usub_sat)
3148 return ConstantInt::get(Ty, C0->
usub_sat(*C1));
3150 return ConstantInt::get(Ty, C0->
ssub_sat(*C1));
3151 case Intrinsic::cttz:
3152 case Intrinsic::ctlz:
3153 assert(C1 &&
"Must be constant int");
3160 if (IntrinsicID == Intrinsic::cttz)
3165 case Intrinsic::abs:
3166 assert(C1 &&
"Must be constant int");
3177 return ConstantInt::get(Ty, C0->
abs());
3178 case Intrinsic::amdgcn_wave_reduce_umin:
3179 case Intrinsic::amdgcn_wave_reduce_umax:
3180 return dyn_cast<Constant>(
Operands[0]);
3187 if ((isa<ConstantVector>(
Operands[0]) ||
3188 isa<ConstantDataVector>(
Operands[0])) &&
3192 cast<ConstantInt>(
Operands[1])->getValue() == 4) {
3194 switch (IntrinsicID) {
3196 case Intrinsic::x86_avx512_vcvtss2si32:
3197 case Intrinsic::x86_avx512_vcvtss2si64:
3198 case Intrinsic::x86_avx512_vcvtsd2si32:
3199 case Intrinsic::x86_avx512_vcvtsd2si64:
3201 dyn_cast_or_null<ConstantFP>(
Op->getAggregateElement(0U)))
3202 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
3206 case Intrinsic::x86_avx512_vcvtss2usi32:
3207 case Intrinsic::x86_avx512_vcvtss2usi64:
3208 case Intrinsic::x86_avx512_vcvtsd2usi32:
3209 case Intrinsic::x86_avx512_vcvtsd2usi64:
3211 dyn_cast_or_null<ConstantFP>(
Op->getAggregateElement(0U)))
3212 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
3216 case Intrinsic::x86_avx512_cvttss2si:
3217 case Intrinsic::x86_avx512_cvttss2si64:
3218 case Intrinsic::x86_avx512_cvttsd2si:
3219 case Intrinsic::x86_avx512_cvttsd2si64:
3221 dyn_cast_or_null<ConstantFP>(
Op->getAggregateElement(0U)))
3222 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
3226 case Intrinsic::x86_avx512_cvttss2usi:
3227 case Intrinsic::x86_avx512_cvttss2usi64:
3228 case Intrinsic::x86_avx512_cvttsd2usi:
3229 case Intrinsic::x86_avx512_cvttsd2usi64:
3231 dyn_cast_or_null<ConstantFP>(
Op->getAggregateElement(0U)))
3232 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
3260 if (
S1.isNegative() &&
S1.isNonZero() && !
S1.isNaN()) {
3282 switch (IntrinsicID) {
3285 case Intrinsic::amdgcn_cubeid:
3287 case Intrinsic::amdgcn_cubema:
3289 case Intrinsic::amdgcn_cubesc:
3291 case Intrinsic::amdgcn_cubetc:
3298 const APInt *C0, *C1, *C2;
3299 if (!getConstIntOrUndef(
Operands[0], C0) ||
3300 !getConstIntOrUndef(
Operands[1], C1) ||
3301 !getConstIntOrUndef(
Operands[2], C2))
3308 unsigned NumUndefBytes = 0;
3309 for (
unsigned I = 0;
I < 32;
I += 8) {
3318 const APInt *Src = ((Sel & 10) == 10 || (Sel & 12) == 4) ? C0 : C1;
3322 B = Src->extractBitsAsZExtValue(8, (Sel & 3) * 8);
3324 B = Src->extractBitsAsZExtValue(1, (Sel & 1) ? 31 : 15) * 0xff;
3327 Val.insertBits(
B,
I, 8);
3330 if (NumUndefBytes == 4)
3333 return ConstantInt::get(Ty, Val);
3344 if (
const auto *Op1 = dyn_cast<ConstantFP>(
Operands[0])) {
3345 if (
const auto *Op2 = dyn_cast<ConstantFP>(
Operands[1])) {
3346 if (
const auto *Op3 = dyn_cast<ConstantFP>(
Operands[2])) {
3347 const APFloat &C1 = Op1->getValueAPF();
3348 const APFloat &C2 = Op2->getValueAPF();
3349 const APFloat &C3 = Op3->getValueAPF();
3351 if (
const auto *ConstrIntr = dyn_cast<ConstrainedFPIntrinsic>(Call)) {
3355 switch (IntrinsicID) {
3358 case Intrinsic::experimental_constrained_fma:
3359 case Intrinsic::experimental_constrained_fmuladd:
3363 if (mayFoldConstrained(
3365 return ConstantFP::get(Ty->
getContext(), Res);
3369 switch (IntrinsicID) {
3371 case Intrinsic::amdgcn_fma_legacy: {
3381 case Intrinsic::fma:
3382 case Intrinsic::fmuladd: {
3384 V.fusedMultiplyAdd(C2, C3, APFloat::rmNearestTiesToEven);
3387 case Intrinsic::amdgcn_cubeid:
3388 case Intrinsic::amdgcn_cubema:
3389 case Intrinsic::amdgcn_cubesc:
3390 case Intrinsic::amdgcn_cubetc: {
3391 APFloat V = ConstantFoldAMDGCNCubeIntrinsic(IntrinsicID, C1, C2, C3);
3399 if (IntrinsicID == Intrinsic::smul_fix ||
3400 IntrinsicID == Intrinsic::smul_fix_sat) {
3406 const APInt *C0, *C1;
3407 if (!getConstIntOrUndef(
Operands[0], C0) ||
3408 !getConstIntOrUndef(
Operands[1], C1))
3422 unsigned Scale = cast<ConstantInt>(
Operands[2])->getZExtValue();
3424 assert(Scale < Width &&
"Illegal scale.");
3425 unsigned ExtendedWidth = Width * 2;
3427 (C0->
sext(ExtendedWidth) * C1->
sext(ExtendedWidth)).ashr(Scale);
3428 if (IntrinsicID == Intrinsic::smul_fix_sat) {
3437 if (IntrinsicID == Intrinsic::fshl || IntrinsicID == Intrinsic::fshr) {
3438 const APInt *C0, *C1, *C2;
3439 if (!getConstIntOrUndef(
Operands[0], C0) ||
3440 !getConstIntOrUndef(
Operands[1], C1) ||
3441 !getConstIntOrUndef(
Operands[2], C2))
3444 bool IsRight = IntrinsicID == Intrinsic::fshr;
3458 unsigned LshrAmt = IsRight ? ShAmt :
BitWidth - ShAmt;
3459 unsigned ShlAmt = !IsRight ? ShAmt :
BitWidth - ShAmt;
3461 return ConstantInt::get(Ty, C1->
lshr(LshrAmt));
3463 return ConstantInt::get(Ty, C0->
shl(ShlAmt));
3464 return ConstantInt::get(Ty, C0->
shl(ShlAmt) | C1->
lshr(LshrAmt));
3467 if (IntrinsicID == Intrinsic::amdgcn_perm)
3468 return ConstantFoldAMDGCNPermIntrinsic(
Operands, Ty);
3480 return ConstantFoldScalarCall1(
Name, IntrinsicID, Ty,
Operands, TLI, Call);
3485 return FoldedLibCall;
3487 return ConstantFoldIntrinsicCall2(IntrinsicID, Ty,
Operands, Call);
3491 return ConstantFoldScalarCall3(
Name, IntrinsicID, Ty,
Operands, TLI, Call);
3496static Constant *ConstantFoldFixedVectorCall(
3504 switch (IntrinsicID) {
3505 case Intrinsic::masked_load: {
3514 auto *MaskElt =
Mask->getAggregateElement(
I);
3517 auto *PassthruElt = Passthru->getAggregateElement(
I);
3519 if (isa<UndefValue>(MaskElt)) {
3527 if (MaskElt->isNullValue()) {
3531 }
else if (MaskElt->isOneValue()) {
3543 case Intrinsic::arm_mve_vctp8:
3544 case Intrinsic::arm_mve_vctp16:
3545 case Intrinsic::arm_mve_vctp32:
3546 case Intrinsic::arm_mve_vctp64: {
3547 if (
auto *
Op = dyn_cast<ConstantInt>(
Operands[0])) {
3552 for (
unsigned i = 0; i < Lanes; i++) {
3562 case Intrinsic::get_active_lane_mask: {
3563 auto *Op0 = dyn_cast<ConstantInt>(
Operands[0]);
3564 auto *Op1 = dyn_cast<ConstantInt>(
Operands[1]);
3568 uint64_t Limit = Op1->getZExtValue();
3571 for (
unsigned i = 0; i < Lanes; i++) {
3572 if (
Base + i < Limit)
3587 for (
unsigned J = 0, JE =
Operands.size(); J != JE; ++J) {
3603 ConstantFoldScalarCall(
Name, IntrinsicID, Ty, Lane, TLI, Call);
3612static Constant *ConstantFoldScalableVectorCall(
3616 switch (IntrinsicID) {
3617 case Intrinsic::aarch64_sve_convert_from_svbool: {
3618 auto *Src = dyn_cast<Constant>(
Operands[0]);
3619 if (!Src || !Src->isNullValue())
3630static std::pair<Constant *, Constant *>
3632 if (isa<PoisonValue>(
Op))
3635 auto *ConstFP = dyn_cast<ConstantFP>(
Op);
3639 const APFloat &
U = ConstFP->getValueAPF();
3641 APFloat FrexpMant =
frexp(U, FrexpExp, APFloat::rmNearestTiesToEven);
3642 Constant *Result0 = ConstantFP::get(ConstFP->getType(), FrexpMant);
3649 return {Result0, Result1};
3659 switch (IntrinsicID) {
3660 case Intrinsic::frexp: {
3664 if (
auto *FVTy0 = dyn_cast<FixedVectorType>(Ty0)) {
3668 for (
unsigned I = 0, E = FVTy0->getNumElements();
I != E; ++
I) {
3670 std::tie(Results0[
I], Results1[
I]) =
3671 ConstantFoldScalarFrexpCall(Lane, Ty1);
3680 auto [Result0, Result1] = ConstantFoldScalarFrexpCall(
Operands[0], Ty1);
3685 case Intrinsic::sincos: {
3689 auto ConstantFoldScalarSincosCall =
3690 [&](
Constant *
Op) -> std::pair<Constant *, Constant *> {
3692 ConstantFoldScalarCall(
Name, Intrinsic::sin, TyScalar,
Op, TLI, Call);
3694 ConstantFoldScalarCall(
Name, Intrinsic::cos, TyScalar,
Op, TLI, Call);
3695 return std::make_pair(SinResult, CosResult);
3698 if (
auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
3704 std::tie(SinResults[
I], CosResults[
I]) =
3705 ConstantFoldScalarSincosCall(Lane);
3706 if (!SinResults[
I] || !CosResults[
I])
3714 auto [SinResult, CosResult] = ConstantFoldScalarSincosCall(
Operands[0]);
3715 if (!SinResult || !CosResult)
3722 return ConstantFoldScalarCall(
Name, IntrinsicID, StTy,
Operands, TLI, Call);
3733 return ConstantFoldIntrinsicCall2(
ID, Ty, {
LHS,
RHS},
3734 dyn_cast_if_present<CallBase>(
FMFSource));
3740 bool AllowNonDeterministic) {
3741 if (Call->isNoBuiltin())
3758 Type *Ty =
F->getReturnType();
3763 if (
auto *FVTy = dyn_cast<FixedVectorType>(Ty))
3764 return ConstantFoldFixedVectorCall(
3767 if (
auto *SVTy = dyn_cast<ScalableVectorType>(Ty))
3768 return ConstantFoldScalableVectorCall(
3771 if (
auto *StTy = dyn_cast<StructType>(Ty))
3772 return ConstantFoldStructCall(
Name, IID, StTy,
Operands,
3773 F->getDataLayout(), TLI, Call);
3778 return ConstantFoldScalarCall(
Name, IID, Ty,
Operands, TLI, Call);
3785 if (Call->isNoBuiltin() || Call->isStrictFP())
3787 Function *
F = Call->getCalledFunction();
3795 if (Call->arg_size() == 1) {
3796 if (
ConstantFP *OpC = dyn_cast<ConstantFP>(Call->getArgOperand(0))) {
3805 case LibFunc_log10l:
3807 case LibFunc_log10f:
3808 return Op.isNaN() || (!
Op.isZero() && !
Op.isNegative());
3814 if (OpC->getType()->isDoubleTy())
3816 if (OpC->getType()->isFloatTy())
3824 if (OpC->getType()->isDoubleTy())
3826 if (OpC->getType()->isFloatTy())
3836 return !
Op.isInfinity();
3840 case LibFunc_tanf: {
3843 Type *Ty = OpC->getType();
3845 return ConstantFoldFP(tan, OpC->getValueAPF(), Ty) !=
nullptr;
3871 if (OpC->getType()->isDoubleTy())
3873 if (OpC->getType()->isFloatTy())
3880 return Op.isNaN() ||
Op.isZero() || !
Op.isNegative();
3890 if (Call->arg_size() == 2) {
3891 ConstantFP *Op0C = dyn_cast<ConstantFP>(Call->getArgOperand(0));
3892 ConstantFP *Op1C = dyn_cast<ConstantFP>(Call->getArgOperand(1));
3900 case LibFunc_powf: {
3906 return ConstantFoldBinaryFP(pow, Op0, Op1, Ty) !=
nullptr;
3914 case LibFunc_remainderl:
3915 case LibFunc_remainder:
3916 case LibFunc_remainderf:
3921 case LibFunc_atan2f:
3922 case LibFunc_atan2l:
3938void TargetFolder::anchor() {}
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
This file implements the APSInt class, which is a simple class that represents an arbitrary sized int...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static Constant * FoldBitCast(Constant *V, Type *DestTy)
static ConstantFP * flushDenormalConstant(Type *Ty, const APFloat &APF, DenormalMode::DenormalModeKind Mode)
Constant * getConstantAtOffset(Constant *Base, APInt Offset, const DataLayout &DL)
If this Offset points exactly to the start of an aggregate element, return that element,...
static ConstantFP * flushDenormalConstantFP(ConstantFP *CFP, const Instruction *Inst, bool IsOutput)
static DenormalMode getInstrDenormalMode(const Instruction *CtxI, Type *Ty)
Return the denormal mode that can be assumed when executing a floating point operation at CtxI.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file defines the DenseMap class.
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
amode Optimize addressing mode
mir Rename Register Operands
static bool InRange(int64_t Value, unsigned short Shift, int LBound, int HBound)
This file contains the definitions of the enumerations and flags associated with NVVM Intrinsics,...
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
static APFloat getQNaN(const fltSemantics &Sem, bool Negative=false, const APInt *payload=nullptr)
Factory for QNaN values.
opStatus divide(const APFloat &RHS, roundingMode RM)
void copySign(const APFloat &RHS)
opStatus convert(const fltSemantics &ToSemantics, roundingMode RM, bool *losesInfo)
opStatus subtract(const APFloat &RHS, roundingMode RM)
double convertToDouble() const
Converts this APFloat to host double value.
bool isPosInfinity() const
opStatus add(const APFloat &RHS, roundingMode RM)
const fltSemantics & getSemantics() const
static APFloat getOne(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative One.
opStatus multiply(const APFloat &RHS, roundingMode RM)
float convertToFloat() const
Converts this APFloat to host float value.
opStatus fusedMultiplyAdd(const APFloat &Multiplicand, const APFloat &Addend, roundingMode RM)
APInt bitcastToAPInt() const
opStatus convertToInteger(MutableArrayRef< integerPart > Input, unsigned int Width, bool IsSigned, roundingMode RM, bool *IsExact) const
opStatus mod(const APFloat &RHS)
bool isNegInfinity() const
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
Class for arbitrary precision integers.
APInt umul_ov(const APInt &RHS, bool &Overflow) const
APInt usub_sat(const APInt &RHS) const
bool isMinSignedValue() const
Determine if this is the smallest signed value.
uint64_t getZExtValue() const
Get zero extended value.
uint64_t extractBitsAsZExtValue(unsigned numBits, unsigned bitPosition) const
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
APInt trunc(unsigned width) const
Truncate to new width.
APInt abs() const
Get the absolute value.
APInt sadd_sat(const APInt &RHS) const
bool sgt(const APInt &RHS) const
Signed greater than comparison.
APInt usub_ov(const APInt &RHS, bool &Overflow) const
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
APInt urem(const APInt &RHS) const
Unsigned remainder operation.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
APInt sadd_ov(const APInt &RHS, bool &Overflow) const
APInt uadd_ov(const APInt &RHS, bool &Overflow) const
unsigned countr_zero() const
Count the number of trailing zero bits.
unsigned countl_zero() const
The APInt version of std::countl_zero.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
APInt uadd_sat(const APInt &RHS) const
APInt smul_ov(const APInt &RHS, bool &Overflow) const
APInt sext(unsigned width) const
Sign extend to a new width.
APInt shl(unsigned shiftAmt) const
Left-shift function.
bool slt(const APInt &RHS) const
Signed less than comparison.
APInt extractBits(unsigned numBits, unsigned bitPosition) const
Return an APInt with the extracted bits [bitPosition,bitPosition+numBits).
APInt ssub_ov(const APInt &RHS, bool &Overflow) const
bool isOne() const
Determine if this is a value of 1.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
APInt ssub_sat(const APInt &RHS) const
An arbitrary precision integer that knows its signedness.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & back() const
back - Get the last element.
size_t size() const
size - Get the array size.
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
static Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)
Returns the opcode necessary to cast Val into Ty using usual casting rules.
static bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
bool isFPPredicate() const
static Constant * get(LLVMContext &Context, ArrayRef< ElementTy > Elts)
get() constructor - Return a constant with array type with an element count and element type matching...
static Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getExtractElement(Constant *Vec, Constant *Idx, Type *OnlyIfReducedTy=nullptr)
static bool isDesirableCastOp(unsigned Opcode)
Whether creating a constant expression for this cast is desirable.
static Constant * getCast(unsigned ops, Constant *C, Type *Ty, bool OnlyIfReduced=false)
Convenience function for getting a Cast operation.
static Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static Constant * getInsertElement(Constant *Vec, Constant *Elt, Constant *Idx, Type *OnlyIfReducedTy=nullptr)
static Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getShuffleVector(Constant *V1, Constant *V2, ArrayRef< int > Mask, Type *OnlyIfReducedTy=nullptr)
static bool isSupportedGetElementPtr(const Type *SrcElemTy)
Whether creating a constant expression for this getelementptr type is supported.
static Constant * get(unsigned Opcode, Constant *C1, Constant *C2, unsigned Flags=0, Type *OnlyIfReducedTy=nullptr)
get - Return a binary or shift operator constant expression, folding if possible.
static bool isDesirableBinOp(unsigned Opcode)
Whether creating a constant expression for this binary operator is desirable.
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, GEPNoWrapFlags NW=GEPNoWrapFlags::none(), std::optional< ConstantRange > InRange=std::nullopt, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
static Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
ConstantFP - Floating Point Values [float, double].
const APFloat & getValueAPF() const
static Constant * getZero(Type *Ty, bool Negative=false)
This is the shared class of boolean and integer constants.
static ConstantInt * getTrue(LLVMContext &Context)
static ConstantInt * getSigned(IntegerType *Ty, int64_t V)
Return a ConstantInt with the specified value for the specified type.
static ConstantInt * getFalse(LLVMContext &Context)
static ConstantInt * getBool(LLVMContext &Context, bool V)
static Constant * get(StructType *T, ArrayRef< Constant * > V)
static Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
static Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
Constant * getSplatValue(bool AllowPoison=false) const
If all elements of the vector constant have the same value, return that value.
static Constant * getAllOnesValue(Type *Ty)
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Constrained floating point compare intrinsics.
This is the common base class for constrained floating point intrinsics.
std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
std::optional< RoundingMode > getRoundingMode() const
Wrapper for a function that represents a value that functionally represents the original function.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
static bool compare(const APFloat &LHS, const APFloat &RHS, FCmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
This provides a helper for copying FMF from an instruction or setting specified flags.
Class to represent fixed width SIMD vectors.
unsigned getNumElements() const
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
DenormalMode getDenormalMode(const fltSemantics &FPType) const
Returns the denormal handling type for the default rounding mode of the function.
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags inBounds()
GEPNoWrapFlags withoutNoUnsignedSignedWrap() const
static GEPNoWrapFlags noUnsignedWrap()
bool hasNoUnsignedSignedWrap() const
static Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
PointerType * getType() const
Global values are always pointers.
const DataLayout & getDataLayout() const
Get the data layout of the module this global belongs to.
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
static bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
const Function * getFunction() const
Return the function this instruction belongs to.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
This is an important class for using LLVM in a threaded context.
static APInt getSaturationPoint(Intrinsic::ID ID, unsigned numBits)
Min/max intrinsics are monotonic, they operate on a fixed-bitwidth values, so there is a certain thre...
ICmpInst::Predicate getPredicate() const
Returns the comparison predicate underlying the intrinsic.
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Class to represent scalable SIMD vectors.
void push_back(const T &Elt)
pointer data()
Return a pointer to the vector's buffer, even if empty().
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
unsigned getElementContainingOffset(uint64_t FixedOffset) const
Given a valid byte offset into the structure, returns the structure index that contains it.
TypeSize getElementOffset(unsigned Idx) const
Class to represent struct types.
Provides information about what library functions are available for the current target.
bool has(LibFunc F) const
Tests whether a library function is available.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
The instances of the Type class are immutable: once they are created, they are never changed.
unsigned getIntegerBitWidth() const
Type * getStructElementType(unsigned N) const
const fltSemantics & getFltSemantics() const
bool isVectorTy() const
True if this is an instance of VectorType.
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
bool isPointerTy() const
True if this is an instance of PointerType.
static IntegerType * getInt1Ty(LLVMContext &C)
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
bool isBFloatTy() const
Return true if this is 'bfloat', a 16-bit bfloat type.
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
@ HalfTyID
16-bit floating point type
@ FloatTyID
32-bit floating point type
@ DoubleTyID
64-bit floating point type
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool isFP128Ty() const
Return true if this is 'fp128'.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isStructTy() const
True if this is an instance of StructType.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
static IntegerType * getInt16Ty(LLVMContext &C)
bool isAggregateType() const
Return true if the type is an aggregate type.
bool isHalfTy() const
Return true if this is 'half', a 16-bit IEEE fp type.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
static IntegerType * getInt8Ty(LLVMContext &C)
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
bool isX86_AMXTy() const
Return true if this is X86 AMX.
static IntegerType * getInt32Ty(LLVMContext &C)
static IntegerType * getInt64Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
TypeID getTypeID() const
Return the type id for the type.
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Type * getContainedType(unsigned i) const
This method is used to implement the type iterator (defined at the end of the file).
bool isIEEELikeFPTy() const
Return true if this is a well-behaved IEEE-like type, which has a IEEE compatible layout as defined b...
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
LLVMContext & getContext() const
All values hold a context through their type.
Base class of all SIMD vector types.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
Type * getElementType() const
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
const ParentTy * getParent() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
const APInt & smin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be signed.
const APInt & smax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be signed.
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
const APInt & umax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be unsigned.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ SC
CHAIN = SC CHAIN, Imm128 - System call.
@ CE
Windows NT (Windows on ARM)
int ilogb(const IEEEFloat &Arg)
@ ebStrict
This corresponds to "fpexcept.strict".
@ ebIgnore
This corresponds to "fpexcept.ignore".
APFloat::roundingMode IntrinsicGetRoundingMode(Intrinsic::ID IntrinsicID)
bool IntrinsicShouldFTZ(Intrinsic::ID IntrinsicID)
bool IntrinsicConvertsToSignedInteger(Intrinsic::ID IntrinsicID)
NodeAddr< FuncNode * > Func
std::error_code status(const Twine &path, file_status &result, bool follow=true)
Get file status as if by POSIX stat().
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Constant * ConstantFoldBinaryIntrinsic(Intrinsic::ID ID, Constant *LHS, Constant *RHS, Type *Ty, Instruction *FMFSource)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Constant * ConstantFoldLoadThroughBitcast(Constant *C, Type *DestTy, const DataLayout &DL)
ConstantFoldLoadThroughBitcast - try to cast constant to destination type returning null if unsuccess...
static double log2(double V)
Constant * ConstantFoldSelectInstruction(Constant *Cond, Constant *V1, Constant *V2)
Attempt to constant fold a select instruction with the specified operands.
Constant * ConstantFoldFPInstOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL, const Instruction *I, bool AllowNonDeterministic=true)
Attempt to constant fold a floating point binary operation with the specified operands,...
bool canConstantFoldCallTo(const CallBase *Call, const Function *F)
canConstantFoldCallTo - Return true if its even possible to fold a call to the specified function.
unsigned getPointerAddressSpace(const Type *T)
APFloat abs(APFloat X)
Returns the absolute value of the argument.
Constant * ConstantFoldCompareInstruction(CmpInst::Predicate Predicate, Constant *C1, Constant *C2)
Constant * ConstantFoldUnaryInstruction(unsigned Opcode, Constant *V)
bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, APInt &Offset, const DataLayout &DL, DSOLocalEquivalent **DSOEquiv=nullptr)
If this constant is a constant offset from a global, return the global and the constant.
bool isMathLibCallNoop(const CallBase *Call, const TargetLibraryInfo *TLI)
Check whether the given call has no side-effects.
Constant * ReadByteArrayFromGlobal(const GlobalVariable *GV, uint64_t Offset)
LLVM_READONLY APFloat maximum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 maximum semantics.
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
Constant * ConstantFoldCall(const CallBase *Call, Function *F, ArrayRef< Constant * > Operands, const TargetLibraryInfo *TLI=nullptr, bool AllowNonDeterministic=true)
ConstantFoldCall - Attempt to constant fold a call to the specified function with the specified argum...
APFloat frexp(const APFloat &X, int &Exp, APFloat::roundingMode RM)
Equivalent of C standard library function.
Constant * ConstantFoldExtractValueInstruction(Constant *Agg, ArrayRef< unsigned > Idxs)
Attempt to constant fold an extractvalue instruction with the specified operands and indices.
Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Fold the constant using the specified DataLayout.
LLVM_READONLY APFloat maxnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2019 maximumNumber semantics.
Constant * ConstantFoldLoadFromUniformValue(Constant *C, Type *Ty, const DataLayout &DL)
If C is a uniform value where all bits are the same (either all zero, all ones, all undef or all pois...
Constant * ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op, const DataLayout &DL)
Attempt to constant fold a unary operation with the specified operand.
Constant * FlushFPConstant(Constant *Operand, const Instruction *I, bool IsOutput)
Attempt to flush float point constant according to denormal mode set in the instruction's parent func...
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
APFloat scalbn(APFloat X, int Exp, APFloat::roundingMode RM)
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Constant * ConstantFoldInstOperands(Instruction *I, ArrayRef< Constant * > Ops, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, bool AllowNonDeterministic=true)
ConstantFoldInstOperands - Attempt to constant fold an instruction with the specified operands.
Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
Constant * ConstantFoldLoadFromConst(Constant *C, Type *Ty, const APInt &Offset, const DataLayout &DL)
Extract value of C at the given Offset reinterpreted as Ty.
Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
LLVM_READONLY APFloat minnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2019 minimumNumber semantics.
bool isVectorIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, unsigned ScalarOpdIdx, const TargetTransformInfo *TTI)
Identifies if the vector form of the intrinsic has a scalar operand.
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
DWARFExpression::Operation Op
Constant * ConstantFoldInstruction(Instruction *I, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldInstruction - Try to constant fold the specified instruction.
RoundingMode
Rounding mode.
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
constexpr unsigned BitWidth
Constant * ConstantFoldCastInstruction(unsigned opcode, Constant *V, Type *DestTy)
Constant * ConstantFoldInsertValueInstruction(Constant *Agg, Constant *Val, ArrayRef< unsigned > Idxs)
ConstantFoldInsertValueInstruction - Attempt to constant fold an insertvalue instruction with the spe...
Constant * ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, APInt Offset, const DataLayout &DL)
Return the value that a load from C with offset Offset would produce if it is constant and determinab...
LLVM_READONLY APFloat minimum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 minimum semantics.
Constant * ConstantFoldIntegerCast(Constant *C, Type *DestTy, bool IsSigned, const DataLayout &DL)
Constant fold a zext, sext or trunc, depending on IsSigned and whether the DestTy is wider or narrowe...
Constant * ConstantFoldBinaryInstruction(unsigned Opcode, Constant *V1, Constant *V2)
opStatus
IEEE-754R 7: Default exception handling.
Represent subnormal handling kind for floating point instruction inputs and outputs.
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
DenormalModeKind
Represent handled modes for denormal (aka subnormal) modes in the floating point environment.
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
@ PositiveZero
Denormals are flushed to positive zero.
@ Dynamic
Denormals have unknown treatment.
@ IEEE
IEEE-754 denormal numbers preserved.
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
static constexpr DenormalMode getDynamic()
static constexpr DenormalMode getIEEE()
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
bool isConstant() const
Returns true if we know the value of all bits.
const APInt & getConstant() const
Returns the value when all bits have a known value.