31#include "llvm/Config/config.h"
45#include "llvm/IR/IntrinsicsAArch64.h"
46#include "llvm/IR/IntrinsicsAMDGPU.h"
47#include "llvm/IR/IntrinsicsARM.h"
48#include "llvm/IR/IntrinsicsWebAssembly.h"
49#include "llvm/IR/IntrinsicsX86.h"
77 unsigned BitShift =
DL.getTypeSizeInBits(SrcEltTy);
78 for (
unsigned i = 0; i != NumSrcElts; ++i) {
80 if (
DL.isLittleEndian())
81 Element =
C->getAggregateElement(NumSrcElts - i - 1);
83 Element =
C->getAggregateElement(i);
85 if (Element && isa<UndefValue>(Element)) {
90 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
95 Result |= ElementCI->getValue().zext(
Result.getBitWidth());
106 "Invalid constantexpr bitcast!");
112 if (
auto *VTy = dyn_cast<VectorType>(
C->getType())) {
115 unsigned NumSrcElts = cast<FixedVectorType>(VTy)->getNumElements();
116 Type *SrcEltTy = VTy->getElementType();
129 if (
Constant *CE = foldConstVectorToAPInt(Result, DestTy,
C,
130 SrcEltTy, NumSrcElts,
DL))
133 if (isa<IntegerType>(DestTy))
134 return ConstantInt::get(DestTy, Result);
142 auto *DestVTy = dyn_cast<VectorType>(DestTy);
148 if (isa<ConstantFP>(
C) || isa<ConstantInt>(
C)) {
154 if (!isa<ConstantDataVector>(
C) && !isa<ConstantVector>(
C))
158 unsigned NumDstElt = cast<FixedVectorType>(DestVTy)->getNumElements();
159 unsigned NumSrcElt = cast<FixedVectorType>(
C->getType())->getNumElements();
160 if (NumDstElt == NumSrcElt)
163 Type *SrcEltTy = cast<VectorType>(
C->getType())->getElementType();
164 Type *DstEltTy = DestVTy->getElementType();
197 if (!isa<ConstantVector>(
C) &&
198 !isa<ConstantDataVector>(
C))
206 bool isLittleEndian =
DL.isLittleEndian();
209 if (NumDstElt < NumSrcElt) {
212 unsigned Ratio = NumSrcElt/NumDstElt;
215 for (
unsigned i = 0; i != NumDstElt; ++i) {
218 unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1);
219 for (
unsigned j = 0;
j != Ratio; ++
j) {
220 Constant *Src =
C->getAggregateElement(SrcElt++);
221 if (Src && isa<UndefValue>(Src))
223 cast<VectorType>(
C->getType())->getElementType());
225 Src = dyn_cast_or_null<ConstantInt>(Src);
232 assert(Src &&
"Constant folding cannot fail on plain integers");
236 Instruction::Shl, Src, ConstantInt::get(Src->getType(), ShiftAmt),
238 assert(Src &&
"Constant folding cannot fail on plain integers");
240 ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
244 assert(Elt &&
"Constant folding cannot fail on plain integers");
252 unsigned Ratio = NumDstElt/NumSrcElt;
253 unsigned DstBitSize =
DL.getTypeSizeInBits(DstEltTy);
256 for (
unsigned i = 0; i != NumSrcElt; ++i) {
257 auto *Element =
C->getAggregateElement(i);
262 if (isa<UndefValue>(Element)) {
268 auto *Src = dyn_cast<ConstantInt>(Element);
272 unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1);
273 for (
unsigned j = 0;
j != Ratio; ++
j) {
276 APInt Elt = Src->getValue().lshr(ShiftAmt);
277 ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
280 Result.push_back(ConstantInt::get(DstEltTy, Elt.
trunc(DstBitSize)));
298 if ((GV = dyn_cast<GlobalValue>(
C))) {
304 if (
auto *FoundDSOEquiv = dyn_cast<DSOLocalEquivalent>(
C)) {
306 *DSOEquiv = FoundDSOEquiv;
307 GV = FoundDSOEquiv->getGlobalValue();
314 auto *CE = dyn_cast<ConstantExpr>(
C);
315 if (!CE)
return false;
318 if (CE->getOpcode() == Instruction::PtrToInt ||
319 CE->getOpcode() == Instruction::BitCast)
324 auto *
GEP = dyn_cast<GEPOperator>(CE);
328 unsigned BitWidth =
DL.getIndexTypeSizeInBits(
GEP->getType());
337 if (!
GEP->accumulateConstantOffset(
DL, TmpOffset))
347 Type *SrcTy =
C->getType();
351 TypeSize DestSize =
DL.getTypeSizeInBits(DestTy);
352 TypeSize SrcSize =
DL.getTypeSizeInBits(SrcTy);
353 if (!TypeSize::isKnownGE(SrcSize, DestSize))
364 if (SrcSize == DestSize &&
371 Cast = Instruction::IntToPtr;
373 Cast = Instruction::PtrToInt;
394 ElemC =
C->getAggregateElement(Elem++);
395 }
while (ElemC &&
DL.getTypeSizeInBits(ElemC->
getType()).isZero());
400 if (
auto *VT = dyn_cast<VectorType>(SrcTy))
401 if (!
DL.typeSizeEqualsStoreSize(VT->getElementType()))
404 C =
C->getAggregateElement(0u);
419 assert(ByteOffset <=
DL.getTypeAllocSize(
C->getType()) &&
420 "Out of range access");
424 if (isa<ConstantAggregateZero>(
C) || isa<UndefValue>(
C))
427 if (
auto *CI = dyn_cast<ConstantInt>(
C)) {
428 if ((CI->getBitWidth() & 7) != 0)
430 const APInt &Val = CI->getValue();
431 unsigned IntBytes =
unsigned(CI->getBitWidth()/8);
433 for (
unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) {
434 unsigned n = ByteOffset;
435 if (!
DL.isLittleEndian())
436 n = IntBytes - n - 1;
443 if (
auto *CFP = dyn_cast<ConstantFP>(
C)) {
444 if (CFP->getType()->isDoubleTy()) {
446 return ReadDataFromGlobal(
C, ByteOffset, CurPtr, BytesLeft,
DL);
448 if (CFP->getType()->isFloatTy()){
450 return ReadDataFromGlobal(
C, ByteOffset, CurPtr, BytesLeft,
DL);
452 if (CFP->getType()->isHalfTy()){
454 return ReadDataFromGlobal(
C, ByteOffset, CurPtr, BytesLeft,
DL);
459 if (
auto *CS = dyn_cast<ConstantStruct>(
C)) {
463 ByteOffset -= CurEltOffset;
468 uint64_t EltSize =
DL.getTypeAllocSize(CS->getOperand(
Index)->getType());
470 if (ByteOffset < EltSize &&
471 !ReadDataFromGlobal(CS->getOperand(
Index), ByteOffset, CurPtr,
478 if (
Index == CS->getType()->getNumElements())
484 if (BytesLeft <= NextEltOffset - CurEltOffset - ByteOffset)
488 CurPtr += NextEltOffset - CurEltOffset - ByteOffset;
489 BytesLeft -= NextEltOffset - CurEltOffset - ByteOffset;
491 CurEltOffset = NextEltOffset;
496 if (isa<ConstantArray>(
C) || isa<ConstantVector>(
C) ||
497 isa<ConstantDataSequential>(
C)) {
500 if (
auto *AT = dyn_cast<ArrayType>(
C->getType())) {
501 NumElts = AT->getNumElements();
502 EltTy = AT->getElementType();
503 EltSize =
DL.getTypeAllocSize(EltTy);
505 NumElts = cast<FixedVectorType>(
C->getType())->getNumElements();
506 EltTy = cast<FixedVectorType>(
C->getType())->getElementType();
509 if (!
DL.typeSizeEqualsStoreSize(EltTy))
512 EltSize =
DL.getTypeStoreSize(EltTy);
518 if (!ReadDataFromGlobal(
C->getAggregateElement(
Index),
Offset, CurPtr,
523 assert(BytesWritten <= EltSize &&
"Not indexing into this element?");
524 if (BytesWritten >= BytesLeft)
528 BytesLeft -= BytesWritten;
529 CurPtr += BytesWritten;
534 if (
auto *CE = dyn_cast<ConstantExpr>(
C)) {
535 if (
CE->getOpcode() == Instruction::IntToPtr &&
536 CE->getOperand(0)->getType() ==
DL.getIntPtrType(
CE->getType())) {
537 return ReadDataFromGlobal(
CE->getOperand(0), ByteOffset, CurPtr,
549 if (isa<ScalableVectorType>(LoadTy))
552 auto *IntType = dyn_cast<IntegerType>(LoadTy);
565 DL.getTypeSizeInBits(LoadTy).getFixedValue());
586 unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8;
587 if (BytesLoaded > 32 || BytesLoaded == 0)
591 if (
Offset <= -1 *
static_cast<int64_t
>(BytesLoaded))
595 TypeSize InitializerSize =
DL.getTypeAllocSize(
C->getType());
603 unsigned char RawBytes[32] = {0};
604 unsigned char *CurPtr = RawBytes;
605 unsigned BytesLeft = BytesLoaded;
614 if (!ReadDataFromGlobal(
C,
Offset, CurPtr, BytesLeft,
DL))
617 APInt ResultVal =
APInt(IntType->getBitWidth(), 0);
618 if (
DL.isLittleEndian()) {
619 ResultVal = RawBytes[BytesLoaded - 1];
620 for (
unsigned i = 1; i != BytesLoaded; ++i) {
622 ResultVal |= RawBytes[BytesLoaded - 1 - i];
625 ResultVal = RawBytes[0];
626 for (
unsigned i = 1; i != BytesLoaded; ++i) {
628 ResultVal |= RawBytes[i];
632 return ConstantInt::get(IntType->getContext(), ResultVal);
652 if (NBytes > UINT16_MAX)
660 unsigned char *CurPtr = RawBytes.
data();
662 if (!ReadDataFromGlobal(
Init,
Offset, CurPtr, NBytes,
DL))
675 if (!isa<ConstantAggregate>(
Base) && !isa<ConstantDataSequential>(
Base))
680 if (!
Offset.isZero() || !Indices[0].isZero())
685 if (
Index.isNegative() ||
Index.getActiveBits() >= 32)
688 C =
C->getAggregateElement(
Index.getZExtValue());
714 if (
Offset.getSignificantBits() <= 64)
716 FoldReinterpretLoadFromConst(
C, Ty,
Offset.getSExtValue(),
DL))
733 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
736 C = cast<Constant>(
C->stripAndAccumulateConstantOffsets(
757 if (isa<PoisonValue>(
C))
759 if (isa<UndefValue>(
C))
763 if (!
DL.typeSizeEqualsStoreSize(
C->getType()))
767 if (
C->isAllOnesValue() &&
787 if (Opc == Instruction::And) {
790 if ((Known1.
One | Known0.
Zero).isAllOnes()) {
794 if ((Known0.
One | Known1.
Zero).isAllOnes()) {
806 if (Opc == Instruction::Sub) {
812 unsigned OpSize =
DL.getTypeSizeInBits(Op0->
getType());
829 std::optional<ConstantRange>
InRange,
831 Type *IntIdxTy =
DL.getIndexType(ResultTy);
836 for (
unsigned i = 1, e = Ops.
size(); i != e; ++i) {
839 SrcElemTy, Ops.
slice(1, i - 1)))) &&
840 Ops[i]->getType()->getScalarType() != IntIdxScalarTy) {
843 Ops[i]->getType()->isVectorTy() ? IntIdxTy : IntIdxScalarTy;
867 Type *SrcElemTy =
GEP->getSourceElementType();
869 if (!SrcElemTy->
isSized() || isa<ScalableVectorType>(SrcElemTy))
872 if (
Constant *
C = CastGEPIndices(SrcElemTy, Ops, ResTy,
GEP->getNoWrapFlags(),
873 GEP->getInRange(),
DL, TLI))
877 if (!
Ptr->getType()->isPointerTy())
880 Type *IntIdxTy =
DL.getIndexType(
Ptr->getType());
882 for (
unsigned i = 1, e = Ops.
size(); i != e; ++i)
883 if (!isa<ConstantInt>(Ops[i]))
886 unsigned BitWidth =
DL.getTypeSizeInBits(IntIdxTy);
889 DL.getIndexedOffsetInType(
892 std::optional<ConstantRange>
InRange =
GEP->getInRange();
898 bool Overflow =
false;
899 while (
auto *
GEP = dyn_cast<GEPOperator>(
Ptr)) {
900 NW &=
GEP->getNoWrapFlags();
905 bool AllConstantInt =
true;
906 for (
Value *NestedOp : NestedOps)
907 if (!isa<ConstantInt>(NestedOp)) {
908 AllConstantInt =
false;
922 Ptr = cast<Constant>(
GEP->getOperand(0));
923 SrcElemTy =
GEP->getSourceElementType();
937 if (
auto *CE = dyn_cast<ConstantExpr>(
Ptr)) {
938 if (
CE->getOpcode() == Instruction::IntToPtr) {
939 if (
auto *
Base = dyn_cast<ConstantInt>(
CE->getOperand(0)))
944 auto *PTy = cast<PointerType>(
Ptr->getType());
945 if ((
Ptr->isNullValue() || BasePtr != 0) &&
946 !
DL.isNonIntegralPointerType(PTy)) {
954 bool CanBeNull, CanBeFreed;
956 Ptr->getPointerDereferenceableBytes(
DL, CanBeNull, CanBeFreed);
957 if (DerefBytes != 0 && !CanBeNull &&
Offset.sle(DerefBytes))
964 ConstantInt::get(Ctx,
Offset), NW,
973Constant *ConstantFoldInstOperandsImpl(
const Value *InstOrCE,
unsigned Opcode,
977 bool AllowNonDeterministic) {
987 case Instruction::FAdd:
988 case Instruction::FSub:
989 case Instruction::FMul:
990 case Instruction::FDiv:
991 case Instruction::FRem:
995 if (
const auto *
I = dyn_cast<Instruction>(InstOrCE)) {
997 AllowNonDeterministic);
1006 if (
auto *
GEP = dyn_cast<GEPOperator>(InstOrCE)) {
1007 Type *SrcElemTy =
GEP->getSourceElementType();
1015 GEP->getNoWrapFlags(),
1019 if (
auto *CE = dyn_cast<ConstantExpr>(InstOrCE))
1020 return CE->getWithOperands(Ops);
1023 default:
return nullptr;
1024 case Instruction::ICmp:
1025 case Instruction::FCmp: {
1026 auto *
C = cast<CmpInst>(InstOrCE);
1030 case Instruction::Freeze:
1032 case Instruction::Call:
1033 if (
auto *
F = dyn_cast<Function>(Ops.
back())) {
1034 const auto *
Call = cast<CallBase>(InstOrCE);
1037 AllowNonDeterministic);
1040 case Instruction::Select:
1042 case Instruction::ExtractElement:
1044 case Instruction::ExtractValue:
1046 Ops[0], cast<ExtractValueInst>(InstOrCE)->getIndices());
1047 case Instruction::InsertElement:
1049 case Instruction::InsertValue:
1051 Ops[0], Ops[1], cast<InsertValueInst>(InstOrCE)->getIndices());
1052 case Instruction::ShuffleVector:
1054 Ops[0], Ops[1], cast<ShuffleVectorInst>(InstOrCE)->getShuffleMask());
1055 case Instruction::Load: {
1056 const auto *LI = dyn_cast<LoadInst>(InstOrCE);
1057 if (LI->isVolatile())
1076 if (!isa<ConstantVector>(
C) && !isa<ConstantExpr>(
C))
1080 for (
const Use &OldU :
C->operands()) {
1081 Constant *OldC = cast<Constant>(&OldU);
1085 if (isa<ConstantVector>(OldC) || isa<ConstantExpr>(OldC)) {
1086 auto It = FoldedOps.
find(OldC);
1087 if (It == FoldedOps.
end()) {
1088 NewC = ConstantFoldConstantImpl(OldC,
DL, TLI, FoldedOps);
1089 FoldedOps.
insert({OldC, NewC});
1097 if (
auto *CE = dyn_cast<ConstantExpr>(
C)) {
1098 if (
Constant *Res = ConstantFoldInstOperandsImpl(
1099 CE,
CE->getOpcode(), Ops,
DL, TLI,
true))
1104 assert(isa<ConstantVector>(
C));
1113 if (
auto *PN = dyn_cast<PHINode>(
I)) {
1129 C = ConstantFoldConstantImpl(
C,
DL, TLI, FoldedOps);
1132 if (CommonValue &&
C != CommonValue)
1143 if (!
all_of(
I->operands(), [](
Use &U) { return isa<Constant>(U); }))
1148 for (
const Use &OpU :
I->operands()) {
1149 auto *
Op = cast<Constant>(&OpU);
1151 Op = ConstantFoldConstantImpl(
Op,
DL, TLI, FoldedOps);
1161 return ConstantFoldConstantImpl(
C,
DL, TLI, FoldedOps);
1168 bool AllowNonDeterministic) {
1169 return ConstantFoldInstOperandsImpl(
I,
I->getOpcode(), Ops,
DL, TLI,
1170 AllowNonDeterministic);
1187 if (
auto *CE0 = dyn_cast<ConstantExpr>(Ops0)) {
1189 if (CE0->getOpcode() == Instruction::IntToPtr) {
1190 Type *IntPtrTy =
DL.getIntPtrType(CE0->getType());
1202 if (CE0->getOpcode() == Instruction::PtrToInt) {
1203 Type *IntPtrTy =
DL.getIntPtrType(CE0->getOperand(0)->getType());
1204 if (CE0->getType() == IntPtrTy) {
1212 if (
auto *CE1 = dyn_cast<ConstantExpr>(Ops1)) {
1213 if (CE0->getOpcode() == CE1->getOpcode()) {
1214 if (CE0->getOpcode() == Instruction::IntToPtr) {
1215 Type *IntPtrTy =
DL.getIntPtrType(CE0->getType());
1229 if (CE0->getOpcode() == Instruction::PtrToInt) {
1230 Type *IntPtrTy =
DL.getIntPtrType(CE0->getOperand(0)->getType());
1231 if (CE0->getType() == IntPtrTy &&
1232 CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType()) {
1234 Predicate, CE0->getOperand(0), CE1->getOperand(0),
DL, TLI);
1246 unsigned IndexWidth =
DL.getIndexTypeSizeInBits(Ops0->
getType());
1247 APInt Offset0(IndexWidth, 0);
1250 APInt Offset1(IndexWidth, 0);
1253 if (Stripped0 == Stripped1)
1259 }
else if (isa<ConstantExpr>(Ops1)) {
1262 Predicate = ICmpInst::getSwappedPredicate(Predicate);
1289 if (isa<ConstantExpr>(
LHS) || isa<ConstantExpr>(
RHS))
1300 if (!
I || !
I->getParent() || !
I->getFunction())
1303 ConstantFP *CFP = dyn_cast<ConstantFP>(Operand);
1326 return ConstantFP::get(
1344 bool AllowNonDeterministic) {
1357 if (!AllowNonDeterministic)
1358 if (
auto *
FP = dyn_cast_or_null<FPMathOperator>(
I))
1359 if (
FP->hasNoSignedZeros() ||
FP->hasAllowReassoc() ||
1360 FP->hasAllowContract() ||
FP->hasAllowReciprocal())
1374 if (!AllowNonDeterministic &&
C->isNaN())
1390 case Instruction::PtrToInt:
1391 if (
auto *CE = dyn_cast<ConstantExpr>(
C)) {
1395 if (CE->getOpcode() == Instruction::IntToPtr) {
1398 DL.getIntPtrType(CE->getType()),
1400 }
else if (
auto *
GEP = dyn_cast<GEPOperator>(CE)) {
1404 unsigned BitWidth =
DL.getIndexTypeSizeInBits(
GEP->getType());
1406 auto *
Base = cast<Constant>(
GEP->stripAndAccumulateConstantOffsets(
1407 DL, BaseOffset,
true));
1408 if (
Base->isNullValue()) {
1409 FoldedValue = ConstantInt::get(CE->getContext(), BaseOffset);
1412 if (
GEP->getNumIndices() == 1 &&
1413 GEP->getSourceElementType()->isIntegerTy(8)) {
1414 auto *
Ptr = cast<Constant>(
GEP->getPointerOperand());
1415 auto *Sub = dyn_cast<ConstantExpr>(
GEP->getOperand(1));
1416 Type *IntIdxTy =
DL.getIndexType(
Ptr->getType());
1417 if (Sub && Sub->getType() == IntIdxTy &&
1418 Sub->getOpcode() == Instruction::Sub &&
1419 Sub->getOperand(0)->isNullValue())
1432 case Instruction::IntToPtr:
1437 if (
auto *CE = dyn_cast<ConstantExpr>(
C)) {
1438 if (CE->getOpcode() == Instruction::PtrToInt) {
1439 Constant *SrcPtr = CE->getOperand(0);
1440 unsigned SrcPtrSize =
DL.getPointerTypeSizeInBits(SrcPtr->
getType());
1441 unsigned MidIntSize = CE->getType()->getScalarSizeInBits();
1443 if (MidIntSize >= SrcPtrSize) {
1451 case Instruction::Trunc:
1452 case Instruction::ZExt:
1453 case Instruction::SExt:
1454 case Instruction::FPTrunc:
1455 case Instruction::FPExt:
1456 case Instruction::UIToFP:
1457 case Instruction::SIToFP:
1458 case Instruction::FPToUI:
1459 case Instruction::FPToSI:
1460 case Instruction::AddrSpaceCast:
1462 case Instruction::BitCast:
1473 Type *SrcTy =
C->getType();
1474 if (SrcTy == DestTy)
1488 if (Call->isNoBuiltin())
1490 if (Call->getFunctionType() !=
F->getFunctionType())
1492 switch (
F->getIntrinsicID()) {
1495 case Intrinsic::bswap:
1496 case Intrinsic::ctpop:
1497 case Intrinsic::ctlz:
1498 case Intrinsic::cttz:
1499 case Intrinsic::fshl:
1500 case Intrinsic::fshr:
1501 case Intrinsic::launder_invariant_group:
1502 case Intrinsic::strip_invariant_group:
1503 case Intrinsic::masked_load:
1504 case Intrinsic::get_active_lane_mask:
1505 case Intrinsic::abs:
1506 case Intrinsic::smax:
1507 case Intrinsic::smin:
1508 case Intrinsic::umax:
1509 case Intrinsic::umin:
1510 case Intrinsic::scmp:
1511 case Intrinsic::ucmp:
1512 case Intrinsic::sadd_with_overflow:
1513 case Intrinsic::uadd_with_overflow:
1514 case Intrinsic::ssub_with_overflow:
1515 case Intrinsic::usub_with_overflow:
1516 case Intrinsic::smul_with_overflow:
1517 case Intrinsic::umul_with_overflow:
1518 case Intrinsic::sadd_sat:
1519 case Intrinsic::uadd_sat:
1520 case Intrinsic::ssub_sat:
1521 case Intrinsic::usub_sat:
1522 case Intrinsic::smul_fix:
1523 case Intrinsic::smul_fix_sat:
1524 case Intrinsic::bitreverse:
1525 case Intrinsic::is_constant:
1526 case Intrinsic::vector_reduce_add:
1527 case Intrinsic::vector_reduce_mul:
1528 case Intrinsic::vector_reduce_and:
1529 case Intrinsic::vector_reduce_or:
1530 case Intrinsic::vector_reduce_xor:
1531 case Intrinsic::vector_reduce_smin:
1532 case Intrinsic::vector_reduce_smax:
1533 case Intrinsic::vector_reduce_umin:
1534 case Intrinsic::vector_reduce_umax:
1536 case Intrinsic::amdgcn_perm:
1537 case Intrinsic::amdgcn_wave_reduce_umin:
1538 case Intrinsic::amdgcn_wave_reduce_umax:
1539 case Intrinsic::amdgcn_s_wqm:
1540 case Intrinsic::amdgcn_s_quadmask:
1541 case Intrinsic::amdgcn_s_bitreplicate:
1542 case Intrinsic::arm_mve_vctp8:
1543 case Intrinsic::arm_mve_vctp16:
1544 case Intrinsic::arm_mve_vctp32:
1545 case Intrinsic::arm_mve_vctp64:
1546 case Intrinsic::aarch64_sve_convert_from_svbool:
1548 case Intrinsic::wasm_trunc_signed:
1549 case Intrinsic::wasm_trunc_unsigned:
1554 case Intrinsic::minnum:
1555 case Intrinsic::maxnum:
1556 case Intrinsic::minimum:
1557 case Intrinsic::maximum:
1558 case Intrinsic::log:
1559 case Intrinsic::log2:
1560 case Intrinsic::log10:
1561 case Intrinsic::exp:
1562 case Intrinsic::exp2:
1563 case Intrinsic::exp10:
1564 case Intrinsic::sqrt:
1565 case Intrinsic::sin:
1566 case Intrinsic::cos:
1567 case Intrinsic::pow:
1568 case Intrinsic::powi:
1569 case Intrinsic::ldexp:
1570 case Intrinsic::fma:
1571 case Intrinsic::fmuladd:
1572 case Intrinsic::frexp:
1573 case Intrinsic::fptoui_sat:
1574 case Intrinsic::fptosi_sat:
1575 case Intrinsic::convert_from_fp16:
1576 case Intrinsic::convert_to_fp16:
1577 case Intrinsic::amdgcn_cos:
1578 case Intrinsic::amdgcn_cubeid:
1579 case Intrinsic::amdgcn_cubema:
1580 case Intrinsic::amdgcn_cubesc:
1581 case Intrinsic::amdgcn_cubetc:
1582 case Intrinsic::amdgcn_fmul_legacy:
1583 case Intrinsic::amdgcn_fma_legacy:
1584 case Intrinsic::amdgcn_fract:
1585 case Intrinsic::amdgcn_sin:
1587 case Intrinsic::x86_sse_cvtss2si:
1588 case Intrinsic::x86_sse_cvtss2si64:
1589 case Intrinsic::x86_sse_cvttss2si:
1590 case Intrinsic::x86_sse_cvttss2si64:
1591 case Intrinsic::x86_sse2_cvtsd2si:
1592 case Intrinsic::x86_sse2_cvtsd2si64:
1593 case Intrinsic::x86_sse2_cvttsd2si:
1594 case Intrinsic::x86_sse2_cvttsd2si64:
1595 case Intrinsic::x86_avx512_vcvtss2si32:
1596 case Intrinsic::x86_avx512_vcvtss2si64:
1597 case Intrinsic::x86_avx512_cvttss2si:
1598 case Intrinsic::x86_avx512_cvttss2si64:
1599 case Intrinsic::x86_avx512_vcvtsd2si32:
1600 case Intrinsic::x86_avx512_vcvtsd2si64:
1601 case Intrinsic::x86_avx512_cvttsd2si:
1602 case Intrinsic::x86_avx512_cvttsd2si64:
1603 case Intrinsic::x86_avx512_vcvtss2usi32:
1604 case Intrinsic::x86_avx512_vcvtss2usi64:
1605 case Intrinsic::x86_avx512_cvttss2usi:
1606 case Intrinsic::x86_avx512_cvttss2usi64:
1607 case Intrinsic::x86_avx512_vcvtsd2usi32:
1608 case Intrinsic::x86_avx512_vcvtsd2usi64:
1609 case Intrinsic::x86_avx512_cvttsd2usi:
1610 case Intrinsic::x86_avx512_cvttsd2usi64:
1611 return !Call->isStrictFP();
1615 case Intrinsic::fabs:
1616 case Intrinsic::copysign:
1617 case Intrinsic::is_fpclass:
1620 case Intrinsic::ceil:
1621 case Intrinsic::floor:
1622 case Intrinsic::round:
1623 case Intrinsic::roundeven:
1624 case Intrinsic::trunc:
1625 case Intrinsic::nearbyint:
1626 case Intrinsic::rint:
1627 case Intrinsic::canonicalize:
1630 case Intrinsic::experimental_constrained_fma:
1631 case Intrinsic::experimental_constrained_fmuladd:
1632 case Intrinsic::experimental_constrained_fadd:
1633 case Intrinsic::experimental_constrained_fsub:
1634 case Intrinsic::experimental_constrained_fmul:
1635 case Intrinsic::experimental_constrained_fdiv:
1636 case Intrinsic::experimental_constrained_frem:
1637 case Intrinsic::experimental_constrained_ceil:
1638 case Intrinsic::experimental_constrained_floor:
1639 case Intrinsic::experimental_constrained_round:
1640 case Intrinsic::experimental_constrained_roundeven:
1641 case Intrinsic::experimental_constrained_trunc:
1642 case Intrinsic::experimental_constrained_nearbyint:
1643 case Intrinsic::experimental_constrained_rint:
1644 case Intrinsic::experimental_constrained_fcmp:
1645 case Intrinsic::experimental_constrained_fcmps:
1652 if (!
F->hasName() || Call->isStrictFP())
1663 return Name ==
"acos" ||
Name ==
"acosf" ||
1664 Name ==
"asin" ||
Name ==
"asinf" ||
1665 Name ==
"atan" ||
Name ==
"atanf" ||
1666 Name ==
"atan2" ||
Name ==
"atan2f";
1668 return Name ==
"ceil" ||
Name ==
"ceilf" ||
1672 return Name ==
"exp" ||
Name ==
"expf" ||
1675 return Name ==
"fabs" ||
Name ==
"fabsf" ||
1676 Name ==
"floor" ||
Name ==
"floorf" ||
1679 return Name ==
"log" ||
Name ==
"logf" ||
Name ==
"log2" ||
1680 Name ==
"log2f" ||
Name ==
"log10" ||
Name ==
"log10f" ||
1683 return Name ==
"nearbyint" ||
Name ==
"nearbyintf";
1685 return Name ==
"pow" ||
Name ==
"powf";
1687 return Name ==
"remainder" ||
Name ==
"remainderf" ||
1688 Name ==
"rint" ||
Name ==
"rintf" ||
1689 Name ==
"round" ||
Name ==
"roundf";
1691 return Name ==
"sin" ||
Name ==
"sinf" ||
1692 Name ==
"sinh" ||
Name ==
"sinhf" ||
1695 return Name ==
"tan" ||
Name ==
"tanf" ||
1696 Name ==
"tanh" ||
Name ==
"tanhf" ||
1697 Name ==
"trunc" ||
Name ==
"truncf";
1705 if (
Name.size() < 12 ||
Name[1] !=
'_')
1711 return Name ==
"__acos_finite" ||
Name ==
"__acosf_finite" ||
1712 Name ==
"__asin_finite" ||
Name ==
"__asinf_finite" ||
1713 Name ==
"__atan2_finite" ||
Name ==
"__atan2f_finite";
1715 return Name ==
"__cosh_finite" ||
Name ==
"__coshf_finite";
1717 return Name ==
"__exp_finite" ||
Name ==
"__expf_finite" ||
1718 Name ==
"__exp2_finite" ||
Name ==
"__exp2f_finite";
1720 return Name ==
"__log_finite" ||
Name ==
"__logf_finite" ||
1721 Name ==
"__log10_finite" ||
Name ==
"__log10f_finite";
1723 return Name ==
"__pow_finite" ||
Name ==
"__powf_finite";
1725 return Name ==
"__sinh_finite" ||
Name ==
"__sinhf_finite";
1736 APF.convert(Ty->
getFltSemantics(), APFloat::rmNearestTiesToEven, &unused);
1737 return ConstantFP::get(Ty->
getContext(), APF);
1744#if defined(HAS_IEE754_FLOAT128) && defined(HAS_LOGF128)
1745Constant *GetConstantFoldFPValue128(float128 V,
Type *Ty) {
1747 return ConstantFP::get(Ty, V);
1753inline void llvm_fenv_clearexcept() {
1754#if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT
1755 feclearexcept(FE_ALL_EXCEPT);
1761inline bool llvm_fenv_testexcept() {
1762 int errno_val = errno;
1763 if (errno_val == ERANGE || errno_val == EDOM)
1765#if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT && HAVE_DECL_FE_INEXACT
1766 if (fetestexcept(FE_ALL_EXCEPT & ~FE_INEXACT))
1774 llvm_fenv_clearexcept();
1775 double Result = NativeFP(
V.convertToDouble());
1776 if (llvm_fenv_testexcept()) {
1777 llvm_fenv_clearexcept();
1781 return GetConstantFoldFPValue(Result, Ty);
1784#if defined(HAS_IEE754_FLOAT128) && defined(HAS_LOGF128)
1785Constant *ConstantFoldFP128(float128 (*NativeFP)(float128),
const APFloat &V,
1787 llvm_fenv_clearexcept();
1788 float128
Result = NativeFP(
V.convertToQuad());
1789 if (llvm_fenv_testexcept()) {
1790 llvm_fenv_clearexcept();
1794 return GetConstantFoldFPValue128(Result, Ty);
1798Constant *ConstantFoldBinaryFP(
double (*NativeFP)(
double,
double),
1800 llvm_fenv_clearexcept();
1801 double Result = NativeFP(
V.convertToDouble(),
W.convertToDouble());
1802 if (llvm_fenv_testexcept()) {
1803 llvm_fenv_clearexcept();
1807 return GetConstantFoldFPValue(Result, Ty);
1817 if (isa<ConstantAggregateZero>(
Op))
1821 if (isa<PoisonValue>(
Op) ||
Op->containsPoisonElement())
1825 if (!isa<ConstantVector>(
Op) && !isa<ConstantDataVector>(
Op))
1828 auto *EltC = dyn_cast<ConstantInt>(
Op->getAggregateElement(0U));
1832 APInt Acc = EltC->getValue();
1834 if (!(EltC = dyn_cast<ConstantInt>(
Op->getAggregateElement(
I))))
1836 const APInt &
X = EltC->getValue();
1838 case Intrinsic::vector_reduce_add:
1841 case Intrinsic::vector_reduce_mul:
1844 case Intrinsic::vector_reduce_and:
1847 case Intrinsic::vector_reduce_or:
1850 case Intrinsic::vector_reduce_xor:
1853 case Intrinsic::vector_reduce_smin:
1856 case Intrinsic::vector_reduce_smax:
1859 case Intrinsic::vector_reduce_umin:
1862 case Intrinsic::vector_reduce_umax:
1868 return ConstantInt::get(
Op->getContext(), Acc);
1878Constant *ConstantFoldSSEConvertToInt(
const APFloat &Val,
bool roundTowardZero,
1879 Type *Ty,
bool IsSigned) {
1882 assert(ResultWidth <= 64 &&
1883 "Can only constant fold conversions to 64 and 32 bit ints");
1886 bool isExact =
false;
1888 : APFloat::rmNearestTiesToEven;
1891 IsSigned,
mode, &isExact);
1892 if (status != APFloat::opOK &&
1893 (!roundTowardZero || status != APFloat::opInexact))
1895 return ConstantInt::get(Ty, UIntVal, IsSigned);
1899 Type *Ty =
Op->getType();
1902 return Op->getValueAPF().convertToDouble();
1906 APF.
convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, &unused);
1911 if (
auto *CI = dyn_cast<ConstantInt>(
Op)) {
1912 C = &CI->getValue();
1915 if (isa<UndefValue>(
Op)) {
1934 if (St == APFloat::opStatus::opOK)
1939 if (ORM && *ORM == RoundingMode::Dynamic)
1944 if (EB && *EB != fp::ExceptionBehavior::ebStrict)
1956 if (!ORM || *ORM == RoundingMode::Dynamic)
1961 return RoundingMode::NearestTiesToEven;
1971 return ConstantFP::get(
1983 if (Src.isNormal() || Src.isInfinity())
1984 return ConstantFP::get(CI->
getContext(), Src);
1991 return ConstantFP::get(CI->
getContext(), Src);
2023 if (IntrinsicID == Intrinsic::is_constant) {
2027 if (
Operands[0]->isManifestConstant())
2032 if (isa<PoisonValue>(
Operands[0])) {
2034 if (IntrinsicID == Intrinsic::canonicalize)
2038 if (isa<UndefValue>(
Operands[0])) {
2042 if (IntrinsicID == Intrinsic::cos ||
2043 IntrinsicID == Intrinsic::ctpop ||
2044 IntrinsicID == Intrinsic::fptoui_sat ||
2045 IntrinsicID == Intrinsic::fptosi_sat ||
2046 IntrinsicID == Intrinsic::canonicalize)
2048 if (IntrinsicID == Intrinsic::bswap ||
2049 IntrinsicID == Intrinsic::bitreverse ||
2050 IntrinsicID == Intrinsic::launder_invariant_group ||
2051 IntrinsicID == Intrinsic::strip_invariant_group)
2055 if (isa<ConstantPointerNull>(
Operands[0])) {
2057 if (IntrinsicID == Intrinsic::launder_invariant_group ||
2058 IntrinsicID == Intrinsic::strip_invariant_group) {
2063 Call->getParent() ?
Call->getCaller() :
nullptr;
2073 if (
auto *
Op = dyn_cast<ConstantFP>(
Operands[0])) {
2074 if (IntrinsicID == Intrinsic::convert_to_fp16) {
2078 Val.
convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &lost);
2085 if (IntrinsicID == Intrinsic::wasm_trunc_signed ||
2086 IntrinsicID == Intrinsic::wasm_trunc_unsigned) {
2087 bool Signed = IntrinsicID == Intrinsic::wasm_trunc_signed;
2094 bool IsExact =
false;
2096 U.convertToInteger(
Int, APFloat::rmTowardZero, &IsExact);
2098 if (
Status == APFloat::opOK ||
Status == APFloat::opInexact)
2099 return ConstantInt::get(Ty,
Int);
2104 if (IntrinsicID == Intrinsic::fptoui_sat ||
2105 IntrinsicID == Intrinsic::fptosi_sat) {
2108 IntrinsicID == Intrinsic::fptoui_sat);
2110 U.convertToInteger(
Int, APFloat::rmTowardZero, &IsExact);
2111 return ConstantInt::get(Ty,
Int);
2114 if (IntrinsicID == Intrinsic::canonicalize)
2115 return constantFoldCanonicalize(Ty, Call, U);
2117#if defined(HAS_IEE754_FLOAT128) && defined(HAS_LOGF128)
2119 if (IntrinsicID == Intrinsic::log) {
2120 float128
Result = logf128(
Op->getValueAPF().convertToQuad());
2121 return GetConstantFoldFPValue128(Result, Ty);
2126 Fp128Func == LibFunc_logl)
2127 return ConstantFoldFP128(logf128,
Op->getValueAPF(), Ty);
2136 if (IntrinsicID == Intrinsic::nearbyint || IntrinsicID == Intrinsic::rint) {
2137 U.roundToIntegral(APFloat::rmNearestTiesToEven);
2141 if (IntrinsicID == Intrinsic::round) {
2142 U.roundToIntegral(APFloat::rmNearestTiesToAway);
2146 if (IntrinsicID == Intrinsic::roundeven) {
2147 U.roundToIntegral(APFloat::rmNearestTiesToEven);
2151 if (IntrinsicID == Intrinsic::ceil) {
2152 U.roundToIntegral(APFloat::rmTowardPositive);
2156 if (IntrinsicID == Intrinsic::floor) {
2157 U.roundToIntegral(APFloat::rmTowardNegative);
2161 if (IntrinsicID == Intrinsic::trunc) {
2162 U.roundToIntegral(APFloat::rmTowardZero);
2166 if (IntrinsicID == Intrinsic::fabs) {
2171 if (IntrinsicID == Intrinsic::amdgcn_fract) {
2177 FloorU.roundToIntegral(APFloat::rmTowardNegative);
2179 APFloat AlmostOne(
U.getSemantics(), 1);
2180 AlmostOne.next(
true);
2187 std::optional<APFloat::roundingMode>
RM;
2188 switch (IntrinsicID) {
2191 case Intrinsic::experimental_constrained_nearbyint:
2192 case Intrinsic::experimental_constrained_rint: {
2193 auto CI = cast<ConstrainedFPIntrinsic>(Call);
2194 RM = CI->getRoundingMode();
2195 if (!RM || *RM == RoundingMode::Dynamic)
2199 case Intrinsic::experimental_constrained_round:
2200 RM = APFloat::rmNearestTiesToAway;
2202 case Intrinsic::experimental_constrained_ceil:
2203 RM = APFloat::rmTowardPositive;
2205 case Intrinsic::experimental_constrained_floor:
2206 RM = APFloat::rmTowardNegative;
2208 case Intrinsic::experimental_constrained_trunc:
2209 RM = APFloat::rmTowardZero;
2213 auto CI = cast<ConstrainedFPIntrinsic>(Call);
2216 if (IntrinsicID == Intrinsic::experimental_constrained_rint &&
2217 St == APFloat::opInexact) {
2218 std::optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
2222 }
else if (
U.isSignaling()) {
2223 std::optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
2243 switch (IntrinsicID) {
2245 case Intrinsic::log:
2246 return ConstantFoldFP(log, APF, Ty);
2247 case Intrinsic::log2:
2249 return ConstantFoldFP(
log2, APF, Ty);
2250 case Intrinsic::log10:
2252 return ConstantFoldFP(log10, APF, Ty);
2253 case Intrinsic::exp:
2254 return ConstantFoldFP(exp, APF, Ty);
2255 case Intrinsic::exp2:
2257 return ConstantFoldBinaryFP(pow,
APFloat(2.0), APF, Ty);
2258 case Intrinsic::exp10:
2260 return ConstantFoldBinaryFP(pow,
APFloat(10.0), APF, Ty);
2261 case Intrinsic::sin:
2262 return ConstantFoldFP(sin, APF, Ty);
2263 case Intrinsic::cos:
2264 return ConstantFoldFP(cos, APF, Ty);
2265 case Intrinsic::sqrt:
2266 return ConstantFoldFP(sqrt, APF, Ty);
2267 case Intrinsic::amdgcn_cos:
2268 case Intrinsic::amdgcn_sin: {
2269 double V = getValueAsDouble(
Op);
2270 if (V < -256.0 || V > 256.0)
2275 bool IsCos = IntrinsicID == Intrinsic::amdgcn_cos;
2276 double V4 =
V * 4.0;
2277 if (V4 == floor(V4)) {
2279 const double SinVals[4] = { 0.0, 1.0, 0.0, -1.0 };
2280 V = SinVals[((int)V4 + (IsCos ? 1 : 0)) & 3];
2287 return GetConstantFoldFPValue(V, Ty);
2303 case LibFunc_acos_finite:
2304 case LibFunc_acosf_finite:
2306 return ConstantFoldFP(acos, APF, Ty);
2310 case LibFunc_asin_finite:
2311 case LibFunc_asinf_finite:
2313 return ConstantFoldFP(asin, APF, Ty);
2318 return ConstantFoldFP(atan, APF, Ty);
2322 if (TLI->
has(Func)) {
2323 U.roundToIntegral(APFloat::rmTowardPositive);
2330 return ConstantFoldFP(cos, APF, Ty);
2334 case LibFunc_cosh_finite:
2335 case LibFunc_coshf_finite:
2337 return ConstantFoldFP(cosh, APF, Ty);
2341 case LibFunc_exp_finite:
2342 case LibFunc_expf_finite:
2344 return ConstantFoldFP(exp, APF, Ty);
2348 case LibFunc_exp2_finite:
2349 case LibFunc_exp2f_finite:
2352 return ConstantFoldBinaryFP(pow,
APFloat(2.0), APF, Ty);
2356 if (TLI->
has(Func)) {
2362 case LibFunc_floorf:
2363 if (TLI->
has(Func)) {
2364 U.roundToIntegral(APFloat::rmTowardNegative);
2370 case LibFunc_log_finite:
2371 case LibFunc_logf_finite:
2373 return ConstantFoldFP(log, APF, Ty);
2377 case LibFunc_log2_finite:
2378 case LibFunc_log2f_finite:
2381 return ConstantFoldFP(
log2, APF, Ty);
2384 case LibFunc_log10f:
2385 case LibFunc_log10_finite:
2386 case LibFunc_log10f_finite:
2389 return ConstantFoldFP(log10, APF, Ty);
2393 case LibFunc_nearbyint:
2394 case LibFunc_nearbyintf:
2397 if (TLI->
has(Func)) {
2398 U.roundToIntegral(APFloat::rmNearestTiesToEven);
2403 case LibFunc_roundf:
2404 if (TLI->
has(Func)) {
2405 U.roundToIntegral(APFloat::rmNearestTiesToAway);
2412 return ConstantFoldFP(sin, APF, Ty);
2416 case LibFunc_sinh_finite:
2417 case LibFunc_sinhf_finite:
2419 return ConstantFoldFP(sinh, APF, Ty);
2424 return ConstantFoldFP(sqrt, APF, Ty);
2429 return ConstantFoldFP(tan, APF, Ty);
2434 return ConstantFoldFP(tanh, APF, Ty);
2437 case LibFunc_truncf:
2438 if (TLI->
has(Func)) {
2439 U.roundToIntegral(APFloat::rmTowardZero);
2447 if (
auto *
Op = dyn_cast<ConstantInt>(
Operands[0])) {
2448 switch (IntrinsicID) {
2449 case Intrinsic::bswap:
2450 return ConstantInt::get(Ty->
getContext(),
Op->getValue().byteSwap());
2451 case Intrinsic::ctpop:
2452 return ConstantInt::get(Ty,
Op->getValue().popcount());
2453 case Intrinsic::bitreverse:
2454 return ConstantInt::get(Ty->
getContext(),
Op->getValue().reverseBits());
2455 case Intrinsic::convert_from_fp16: {
2456 APFloat Val(APFloat::IEEEhalf(),
Op->getValue());
2464 assert(status != APFloat::opInexact && !lost &&
2465 "Precision lost during fp16 constfolding");
2467 return ConstantFP::get(Ty->
getContext(), Val);
2470 case Intrinsic::amdgcn_s_wqm: {
2472 Val |= (Val & 0x5555555555555555ULL) << 1 |
2473 ((Val >> 1) & 0x5555555555555555ULL);
2474 Val |= (Val & 0x3333333333333333ULL) << 2 |
2475 ((Val >> 2) & 0x3333333333333333ULL);
2476 return ConstantInt::get(Ty, Val);
2479 case Intrinsic::amdgcn_s_quadmask: {
2482 for (
unsigned I = 0;
I <
Op->getBitWidth() / 4; ++
I, Val >>= 4) {
2486 QuadMask |= (1ULL <<
I);
2488 return ConstantInt::get(Ty, QuadMask);
2491 case Intrinsic::amdgcn_s_bitreplicate: {
2493 Val = (Val & 0x000000000000FFFFULL) | (Val & 0x00000000FFFF0000ULL) << 16;
2494 Val = (Val & 0x000000FF000000FFULL) | (Val & 0x0000FF000000FF00ULL) << 8;
2495 Val = (Val & 0x000F000F000F000FULL) | (Val & 0x00F000F000F000F0ULL) << 4;
2496 Val = (Val & 0x0303030303030303ULL) | (Val & 0x0C0C0C0C0C0C0C0CULL) << 2;
2497 Val = (Val & 0x1111111111111111ULL) | (Val & 0x2222222222222222ULL) << 1;
2498 Val = Val | Val << 1;
2499 return ConstantInt::get(Ty, Val);
2507 switch (IntrinsicID) {
2509 case Intrinsic::vector_reduce_add:
2510 case Intrinsic::vector_reduce_mul:
2511 case Intrinsic::vector_reduce_and:
2512 case Intrinsic::vector_reduce_or:
2513 case Intrinsic::vector_reduce_xor:
2514 case Intrinsic::vector_reduce_smin:
2515 case Intrinsic::vector_reduce_smax:
2516 case Intrinsic::vector_reduce_umin:
2517 case Intrinsic::vector_reduce_umax:
2524 if (isa<ConstantVector>(
Operands[0]) ||
2525 isa<ConstantDataVector>(
Operands[0])) {
2527 switch (IntrinsicID) {
2529 case Intrinsic::x86_sse_cvtss2si:
2530 case Intrinsic::x86_sse_cvtss2si64:
2531 case Intrinsic::x86_sse2_cvtsd2si:
2532 case Intrinsic::x86_sse2_cvtsd2si64:
2534 dyn_cast_or_null<ConstantFP>(
Op->getAggregateElement(0U)))
2535 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2539 case Intrinsic::x86_sse_cvttss2si:
2540 case Intrinsic::x86_sse_cvttss2si64:
2541 case Intrinsic::x86_sse2_cvttsd2si:
2542 case Intrinsic::x86_sse2_cvttsd2si64:
2544 dyn_cast_or_null<ConstantFP>(
Op->getAggregateElement(0U)))
2545 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2558 auto *FCmp = cast<ConstrainedFPCmpIntrinsic>(Call);
2560 if (FCmp->isSignaling()) {
2562 St = APFloat::opInvalidOp;
2565 St = APFloat::opInvalidOp;
2569 return ConstantInt::get(
Call->getType()->getScalarType(), Result);
2583 const auto *Op1 = dyn_cast<ConstantFP>(
Operands[0]);
2587 const auto *Op2 = dyn_cast<ConstantFP>(
Operands[1]);
2591 const APFloat &Op1V = Op1->getValueAPF();
2592 const APFloat &Op2V = Op2->getValueAPF();
2599 case LibFunc_pow_finite:
2600 case LibFunc_powf_finite:
2602 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
2606 if (TLI->
has(Func)) {
2608 if (APFloat::opStatus::opOK ==
V.mod(Op2->getValueAPF()))
2612 case LibFunc_remainder:
2613 case LibFunc_remainderf:
2614 if (TLI->
has(Func)) {
2616 if (APFloat::opStatus::opOK ==
V.remainder(Op2->getValueAPF()))
2621 case LibFunc_atan2f:
2627 case LibFunc_atan2_finite:
2628 case LibFunc_atan2f_finite:
2630 return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty);
2645 bool IsOp0Undef = isa<UndefValue>(
Operands[0]);
2646 bool IsOp1Undef = isa<UndefValue>(
Operands[1]);
2647 switch (IntrinsicID) {
2648 case Intrinsic::maxnum:
2649 case Intrinsic::minnum:
2650 case Intrinsic::maximum:
2651 case Intrinsic::minimum:
2661 if (
const auto *Op1 = dyn_cast<ConstantFP>(
Operands[0])) {
2662 const APFloat &Op1V = Op1->getValueAPF();
2664 if (
const auto *Op2 = dyn_cast<ConstantFP>(
Operands[1])) {
2665 if (Op2->getType() != Op1->getType())
2667 const APFloat &Op2V = Op2->getValueAPF();
2669 if (
const auto *ConstrIntr =
2670 dyn_cast_if_present<ConstrainedFPIntrinsic>(Call)) {
2674 switch (IntrinsicID) {
2677 case Intrinsic::experimental_constrained_fadd:
2678 St = Res.
add(Op2V, RM);
2680 case Intrinsic::experimental_constrained_fsub:
2683 case Intrinsic::experimental_constrained_fmul:
2686 case Intrinsic::experimental_constrained_fdiv:
2687 St = Res.
divide(Op2V, RM);
2689 case Intrinsic::experimental_constrained_frem:
2692 case Intrinsic::experimental_constrained_fcmp:
2693 case Intrinsic::experimental_constrained_fcmps:
2694 return evaluateCompare(Op1V, Op2V, ConstrIntr);
2698 return ConstantFP::get(Ty->
getContext(), Res);
2702 switch (IntrinsicID) {
2705 case Intrinsic::copysign:
2707 case Intrinsic::minnum:
2709 case Intrinsic::maxnum:
2711 case Intrinsic::minimum:
2713 case Intrinsic::maximum:
2720 switch (IntrinsicID) {
2723 case Intrinsic::pow:
2724 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
2725 case Intrinsic::amdgcn_fmul_legacy:
2730 return ConstantFP::get(Ty->
getContext(), Op1V * Op2V);
2733 }
else if (
auto *Op2C = dyn_cast<ConstantInt>(
Operands[1])) {
2734 switch (IntrinsicID) {
2735 case Intrinsic::ldexp: {
2736 return ConstantFP::get(
2738 scalbn(Op1V, Op2C->getSExtValue(), APFloat::rmNearestTiesToEven));
2740 case Intrinsic::is_fpclass: {
2753 return ConstantInt::get(Ty, Result);
2755 case Intrinsic::powi: {
2756 int Exp =
static_cast<int>(Op2C->getSExtValue());
2763 Res.
convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven,
2766 return ConstantFP::get(Ty->
getContext(), Res);
2783 const APInt *C0, *C1;
2784 if (!getConstIntOrUndef(
Operands[0], C0) ||
2785 !getConstIntOrUndef(
Operands[1], C1))
2788 switch (IntrinsicID) {
2790 case Intrinsic::smax:
2791 case Intrinsic::smin:
2792 case Intrinsic::umax:
2793 case Intrinsic::umin:
2803 return ConstantInt::get(
2809 case Intrinsic::scmp:
2810 case Intrinsic::ucmp:
2815 return ConstantInt::get(Ty, 0);
2818 if (IntrinsicID == Intrinsic::scmp)
2819 Res = C0->
sgt(*C1) ? 1 : C0->
slt(*C1) ? -1 : 0;
2821 Res = C0->
ugt(*C1) ? 1 : C0->
ult(*C1) ? -1 : 0;
2822 return ConstantInt::get(Ty, Res,
true);
2824 case Intrinsic::usub_with_overflow:
2825 case Intrinsic::ssub_with_overflow:
2831 case Intrinsic::uadd_with_overflow:
2832 case Intrinsic::sadd_with_overflow:
2837 cast<StructType>(Ty),
2842 case Intrinsic::smul_with_overflow:
2843 case Intrinsic::umul_with_overflow: {
2851 switch (IntrinsicID) {
2853 case Intrinsic::sadd_with_overflow:
2854 Res = C0->
sadd_ov(*C1, Overflow);
2856 case Intrinsic::uadd_with_overflow:
2857 Res = C0->
uadd_ov(*C1, Overflow);
2859 case Intrinsic::ssub_with_overflow:
2860 Res = C0->
ssub_ov(*C1, Overflow);
2862 case Intrinsic::usub_with_overflow:
2863 Res = C0->
usub_ov(*C1, Overflow);
2865 case Intrinsic::smul_with_overflow:
2866 Res = C0->
smul_ov(*C1, Overflow);
2868 case Intrinsic::umul_with_overflow:
2869 Res = C0->
umul_ov(*C1, Overflow);
2878 case Intrinsic::uadd_sat:
2879 case Intrinsic::sadd_sat:
2889 if (IntrinsicID == Intrinsic::uadd_sat)
2890 return ConstantInt::get(Ty, C0->
uadd_sat(*C1));
2892 return ConstantInt::get(Ty, C0->
sadd_sat(*C1));
2893 case Intrinsic::usub_sat:
2894 case Intrinsic::ssub_sat:
2904 if (IntrinsicID == Intrinsic::usub_sat)
2905 return ConstantInt::get(Ty, C0->
usub_sat(*C1));
2907 return ConstantInt::get(Ty, C0->
ssub_sat(*C1));
2908 case Intrinsic::cttz:
2909 case Intrinsic::ctlz:
2910 assert(C1 &&
"Must be constant int");
2917 if (IntrinsicID == Intrinsic::cttz)
2922 case Intrinsic::abs:
2923 assert(C1 &&
"Must be constant int");
2934 return ConstantInt::get(Ty, C0->
abs());
2935 case Intrinsic::amdgcn_wave_reduce_umin:
2936 case Intrinsic::amdgcn_wave_reduce_umax:
2937 return dyn_cast<Constant>(
Operands[0]);
2944 if ((isa<ConstantVector>(
Operands[0]) ||
2945 isa<ConstantDataVector>(
Operands[0])) &&
2949 cast<ConstantInt>(
Operands[1])->getValue() == 4) {
2951 switch (IntrinsicID) {
2953 case Intrinsic::x86_avx512_vcvtss2si32:
2954 case Intrinsic::x86_avx512_vcvtss2si64:
2955 case Intrinsic::x86_avx512_vcvtsd2si32:
2956 case Intrinsic::x86_avx512_vcvtsd2si64:
2958 dyn_cast_or_null<ConstantFP>(
Op->getAggregateElement(0U)))
2959 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2963 case Intrinsic::x86_avx512_vcvtss2usi32:
2964 case Intrinsic::x86_avx512_vcvtss2usi64:
2965 case Intrinsic::x86_avx512_vcvtsd2usi32:
2966 case Intrinsic::x86_avx512_vcvtsd2usi64:
2968 dyn_cast_or_null<ConstantFP>(
Op->getAggregateElement(0U)))
2969 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2973 case Intrinsic::x86_avx512_cvttss2si:
2974 case Intrinsic::x86_avx512_cvttss2si64:
2975 case Intrinsic::x86_avx512_cvttsd2si:
2976 case Intrinsic::x86_avx512_cvttsd2si64:
2978 dyn_cast_or_null<ConstantFP>(
Op->getAggregateElement(0U)))
2979 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2983 case Intrinsic::x86_avx512_cvttss2usi:
2984 case Intrinsic::x86_avx512_cvttss2usi64:
2985 case Intrinsic::x86_avx512_cvttsd2usi:
2986 case Intrinsic::x86_avx512_cvttsd2usi64:
2988 dyn_cast_or_null<ConstantFP>(
Op->getAggregateElement(0U)))
2989 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
3017 if (
S1.isNegative() &&
S1.isNonZero() && !
S1.isNaN()) {
3039 switch (IntrinsicID) {
3042 case Intrinsic::amdgcn_cubeid:
3044 case Intrinsic::amdgcn_cubema:
3046 case Intrinsic::amdgcn_cubesc:
3048 case Intrinsic::amdgcn_cubetc:
3055 const APInt *C0, *C1, *C2;
3056 if (!getConstIntOrUndef(
Operands[0], C0) ||
3057 !getConstIntOrUndef(
Operands[1], C1) ||
3058 !getConstIntOrUndef(
Operands[2], C2))
3065 unsigned NumUndefBytes = 0;
3066 for (
unsigned I = 0;
I < 32;
I += 8) {
3075 const APInt *Src = ((Sel & 10) == 10 || (Sel & 12) == 4) ? C0 : C1;
3079 B = Src->extractBitsAsZExtValue(8, (Sel & 3) * 8);
3081 B = Src->extractBitsAsZExtValue(1, (Sel & 1) ? 31 : 15) * 0xff;
3084 Val.insertBits(
B,
I, 8);
3087 if (NumUndefBytes == 4)
3090 return ConstantInt::get(Ty, Val);
3101 if (
const auto *Op1 = dyn_cast<ConstantFP>(
Operands[0])) {
3102 if (
const auto *Op2 = dyn_cast<ConstantFP>(
Operands[1])) {
3103 if (
const auto *Op3 = dyn_cast<ConstantFP>(
Operands[2])) {
3104 const APFloat &C1 = Op1->getValueAPF();
3105 const APFloat &C2 = Op2->getValueAPF();
3106 const APFloat &C3 = Op3->getValueAPF();
3108 if (
const auto *ConstrIntr = dyn_cast<ConstrainedFPIntrinsic>(Call)) {
3112 switch (IntrinsicID) {
3115 case Intrinsic::experimental_constrained_fma:
3116 case Intrinsic::experimental_constrained_fmuladd:
3120 if (mayFoldConstrained(
3122 return ConstantFP::get(Ty->
getContext(), Res);
3126 switch (IntrinsicID) {
3128 case Intrinsic::amdgcn_fma_legacy: {
3138 case Intrinsic::fma:
3139 case Intrinsic::fmuladd: {
3141 V.fusedMultiplyAdd(C2, C3, APFloat::rmNearestTiesToEven);
3144 case Intrinsic::amdgcn_cubeid:
3145 case Intrinsic::amdgcn_cubema:
3146 case Intrinsic::amdgcn_cubesc:
3147 case Intrinsic::amdgcn_cubetc: {
3148 APFloat V = ConstantFoldAMDGCNCubeIntrinsic(IntrinsicID, C1, C2, C3);
3156 if (IntrinsicID == Intrinsic::smul_fix ||
3157 IntrinsicID == Intrinsic::smul_fix_sat) {
3163 const APInt *C0, *C1;
3164 if (!getConstIntOrUndef(
Operands[0], C0) ||
3165 !getConstIntOrUndef(
Operands[1], C1))
3179 unsigned Scale = cast<ConstantInt>(
Operands[2])->getZExtValue();
3181 assert(Scale < Width &&
"Illegal scale.");
3182 unsigned ExtendedWidth = Width * 2;
3184 (C0->
sext(ExtendedWidth) * C1->
sext(ExtendedWidth)).ashr(Scale);
3185 if (IntrinsicID == Intrinsic::smul_fix_sat) {
3194 if (IntrinsicID == Intrinsic::fshl || IntrinsicID == Intrinsic::fshr) {
3195 const APInt *C0, *C1, *C2;
3196 if (!getConstIntOrUndef(
Operands[0], C0) ||
3197 !getConstIntOrUndef(
Operands[1], C1) ||
3198 !getConstIntOrUndef(
Operands[2], C2))
3201 bool IsRight = IntrinsicID == Intrinsic::fshr;
3215 unsigned LshrAmt = IsRight ? ShAmt :
BitWidth - ShAmt;
3216 unsigned ShlAmt = !IsRight ? ShAmt :
BitWidth - ShAmt;
3218 return ConstantInt::get(Ty, C1->
lshr(LshrAmt));
3220 return ConstantInt::get(Ty, C0->
shl(ShlAmt));
3221 return ConstantInt::get(Ty, C0->
shl(ShlAmt) | C1->
lshr(LshrAmt));
3224 if (IntrinsicID == Intrinsic::amdgcn_perm)
3225 return ConstantFoldAMDGCNPermIntrinsic(
Operands, Ty);
3237 return ConstantFoldScalarCall1(
Name, IntrinsicID, Ty,
Operands, TLI, Call);
3242 return FoldedLibCall;
3244 return ConstantFoldIntrinsicCall2(IntrinsicID, Ty,
Operands, Call);
3248 return ConstantFoldScalarCall3(
Name, IntrinsicID, Ty,
Operands, TLI, Call);
3253static Constant *ConstantFoldFixedVectorCall(
3261 switch (IntrinsicID) {
3262 case Intrinsic::masked_load: {
3271 auto *MaskElt =
Mask->getAggregateElement(
I);
3274 auto *PassthruElt = Passthru->getAggregateElement(
I);
3276 if (isa<UndefValue>(MaskElt)) {
3284 if (MaskElt->isNullValue()) {
3288 }
else if (MaskElt->isOneValue()) {
3300 case Intrinsic::arm_mve_vctp8:
3301 case Intrinsic::arm_mve_vctp16:
3302 case Intrinsic::arm_mve_vctp32:
3303 case Intrinsic::arm_mve_vctp64: {
3304 if (
auto *
Op = dyn_cast<ConstantInt>(
Operands[0])) {
3309 for (
unsigned i = 0; i < Lanes; i++) {
3319 case Intrinsic::get_active_lane_mask: {
3320 auto *Op0 = dyn_cast<ConstantInt>(
Operands[0]);
3321 auto *Op1 = dyn_cast<ConstantInt>(
Operands[1]);
3325 uint64_t Limit = Op1->getZExtValue();
3328 for (
unsigned i = 0; i < Lanes; i++) {
3329 if (
Base + i < Limit)
3344 for (
unsigned J = 0, JE =
Operands.size(); J != JE; ++J) {
3360 ConstantFoldScalarCall(
Name, IntrinsicID, Ty, Lane, TLI, Call);
3369static Constant *ConstantFoldScalableVectorCall(
3373 switch (IntrinsicID) {
3374 case Intrinsic::aarch64_sve_convert_from_svbool: {
3375 auto *Src = dyn_cast<Constant>(
Operands[0]);
3376 if (!Src || !Src->isNullValue())
3387static std::pair<Constant *, Constant *>
3389 if (isa<PoisonValue>(
Op))
3392 auto *ConstFP = dyn_cast<ConstantFP>(
Op);
3396 const APFloat &
U = ConstFP->getValueAPF();
3398 APFloat FrexpMant =
frexp(U, FrexpExp, APFloat::rmNearestTiesToEven);
3399 Constant *Result0 = ConstantFP::get(ConstFP->getType(), FrexpMant);
3406 return {Result0, Result1};
3416 switch (IntrinsicID) {
3417 case Intrinsic::frexp: {
3421 if (
auto *FVTy0 = dyn_cast<FixedVectorType>(Ty0)) {
3425 for (
unsigned I = 0, E = FVTy0->getNumElements();
I != E; ++
I) {
3427 std::tie(Results0[
I], Results1[
I]) =
3428 ConstantFoldScalarFrexpCall(Lane, Ty1);
3437 auto [Result0, Result1] = ConstantFoldScalarFrexpCall(
Operands[0], Ty1);
3445 return ConstantFoldScalarCall(
Name, IntrinsicID, StTy,
Operands, TLI, Call);
3456 return ConstantFoldIntrinsicCall2(
ID, Ty, {
LHS,
RHS},
3457 dyn_cast_if_present<CallBase>(FMFSource));
3463 bool AllowNonDeterministic) {
3464 if (Call->isNoBuiltin())
3481 Type *Ty =
F->getReturnType();
3486 if (
auto *FVTy = dyn_cast<FixedVectorType>(Ty))
3487 return ConstantFoldFixedVectorCall(
3490 if (
auto *SVTy = dyn_cast<ScalableVectorType>(Ty))
3491 return ConstantFoldScalableVectorCall(
3494 if (
auto *StTy = dyn_cast<StructType>(Ty))
3495 return ConstantFoldStructCall(
Name, IID, StTy,
Operands,
3496 F->getDataLayout(), TLI, Call);
3501 return ConstantFoldScalarCall(
Name, IID, Ty,
Operands, TLI, Call);
3508 if (Call->isNoBuiltin() || Call->isStrictFP())
3510 Function *
F = Call->getCalledFunction();
3518 if (Call->arg_size() == 1) {
3519 if (
ConstantFP *OpC = dyn_cast<ConstantFP>(Call->getArgOperand(0))) {
3528 case LibFunc_log10l:
3530 case LibFunc_log10f:
3531 return Op.isNaN() || (!
Op.isZero() && !
Op.isNegative());
3537 if (OpC->getType()->isDoubleTy())
3539 if (OpC->getType()->isFloatTy())
3547 if (OpC->getType()->isDoubleTy())
3549 if (OpC->getType()->isFloatTy())
3559 return !
Op.isInfinity();
3563 case LibFunc_tanf: {
3566 Type *Ty = OpC->getType();
3568 return ConstantFoldFP(tan, OpC->getValueAPF(), Ty) !=
nullptr;
3595 if (OpC->getType()->isDoubleTy())
3597 if (OpC->getType()->isFloatTy())
3604 return Op.isNaN() ||
Op.isZero() || !
Op.isNegative();
3614 if (Call->arg_size() == 2) {
3615 ConstantFP *Op0C = dyn_cast<ConstantFP>(Call->getArgOperand(0));
3616 ConstantFP *Op1C = dyn_cast<ConstantFP>(Call->getArgOperand(1));
3624 case LibFunc_powf: {
3630 return ConstantFoldBinaryFP(pow, Op0, Op1, Ty) !=
nullptr;
3638 case LibFunc_remainderl:
3639 case LibFunc_remainder:
3640 case LibFunc_remainderf:
3645 case LibFunc_atan2f:
3646 case LibFunc_atan2l:
3662void TargetFolder::anchor() {}
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
This file implements the APSInt class, which is a simple class that represents an arbitrary sized int...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static Constant * FoldBitCast(Constant *V, Type *DestTy)
Constant * getConstantAtOffset(Constant *Base, APInt Offset, const DataLayout &DL)
If this Offset points exactly to the start of an aggregate element, return that element,...
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file defines the DenseMap class.
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
amode Optimize addressing mode
mir Rename Register Operands
static bool InRange(int64_t Value, unsigned short Shift, int LBound, int HBound)
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
static APFloat getQNaN(const fltSemantics &Sem, bool Negative=false, const APInt *payload=nullptr)
Factory for QNaN values.
opStatus divide(const APFloat &RHS, roundingMode RM)
void copySign(const APFloat &RHS)
opStatus convert(const fltSemantics &ToSemantics, roundingMode RM, bool *losesInfo)
opStatus subtract(const APFloat &RHS, roundingMode RM)
double convertToDouble() const
Converts this APFloat to host double value.
bool isPosInfinity() const
opStatus add(const APFloat &RHS, roundingMode RM)
const fltSemantics & getSemantics() const
opStatus multiply(const APFloat &RHS, roundingMode RM)
float convertToFloat() const
Converts this APFloat to host float value.
opStatus fusedMultiplyAdd(const APFloat &Multiplicand, const APFloat &Addend, roundingMode RM)
APInt bitcastToAPInt() const
opStatus convertToInteger(MutableArrayRef< integerPart > Input, unsigned int Width, bool IsSigned, roundingMode RM, bool *IsExact) const
opStatus mod(const APFloat &RHS)
bool isNegInfinity() const
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
Class for arbitrary precision integers.
APInt umul_ov(const APInt &RHS, bool &Overflow) const
APInt usub_sat(const APInt &RHS) const
bool isMinSignedValue() const
Determine if this is the smallest signed value.
uint64_t getZExtValue() const
Get zero extended value.
uint64_t extractBitsAsZExtValue(unsigned numBits, unsigned bitPosition) const
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
APInt trunc(unsigned width) const
Truncate to new width.
APInt abs() const
Get the absolute value.
APInt sadd_sat(const APInt &RHS) const
bool sgt(const APInt &RHS) const
Signed greater than comparison.
APInt usub_ov(const APInt &RHS, bool &Overflow) const
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
APInt urem(const APInt &RHS) const
Unsigned remainder operation.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
APInt sadd_ov(const APInt &RHS, bool &Overflow) const
APInt uadd_ov(const APInt &RHS, bool &Overflow) const
unsigned countr_zero() const
Count the number of trailing zero bits.
unsigned countl_zero() const
The APInt version of std::countl_zero.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
APInt uadd_sat(const APInt &RHS) const
APInt smul_ov(const APInt &RHS, bool &Overflow) const
APInt sext(unsigned width) const
Sign extend to a new width.
APInt shl(unsigned shiftAmt) const
Left-shift function.
bool slt(const APInt &RHS) const
Signed less than comparison.
APInt extractBits(unsigned numBits, unsigned bitPosition) const
Return an APInt with the extracted bits [bitPosition,bitPosition+numBits).
APInt ssub_ov(const APInt &RHS, bool &Overflow) const
bool isOne() const
Determine if this is a value of 1.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
APInt ssub_sat(const APInt &RHS) const
An arbitrary precision integer that knows its signedness.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & back() const
back - Get the last element.
size_t size() const
size - Get the array size.
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
static Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)
Returns the opcode necessary to cast Val into Ty using usual casting rules.
static bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
static Constant * get(LLVMContext &Context, ArrayRef< ElementTy > Elts)
get() constructor - Return a constant with array type with an element count and element type matching...
static Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getExtractElement(Constant *Vec, Constant *Idx, Type *OnlyIfReducedTy=nullptr)
static bool isDesirableCastOp(unsigned Opcode)
Whether creating a constant expression for this cast is desirable.
static Constant * getCast(unsigned ops, Constant *C, Type *Ty, bool OnlyIfReduced=false)
Convenience function for getting a Cast operation.
static Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static Constant * getInsertElement(Constant *Vec, Constant *Elt, Constant *Idx, Type *OnlyIfReducedTy=nullptr)
static Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getShuffleVector(Constant *V1, Constant *V2, ArrayRef< int > Mask, Type *OnlyIfReducedTy=nullptr)
static bool isSupportedGetElementPtr(const Type *SrcElemTy)
Whether creating a constant expression for this getelementptr type is supported.
static Constant * get(unsigned Opcode, Constant *C1, Constant *C2, unsigned Flags=0, Type *OnlyIfReducedTy=nullptr)
get - Return a binary or shift operator constant expression, folding if possible.
static bool isDesirableBinOp(unsigned Opcode)
Whether creating a constant expression for this binary operator is desirable.
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, GEPNoWrapFlags NW=GEPNoWrapFlags::none(), std::optional< ConstantRange > InRange=std::nullopt, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
static Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
ConstantFP - Floating Point Values [float, double].
const APFloat & getValueAPF() const
static Constant * getZero(Type *Ty, bool Negative=false)
This is the shared class of boolean and integer constants.
static ConstantInt * getTrue(LLVMContext &Context)
static ConstantInt * getSigned(IntegerType *Ty, int64_t V)
Return a ConstantInt with the specified value for the specified type.
static ConstantInt * getFalse(LLVMContext &Context)
static ConstantInt * getBool(LLVMContext &Context, bool V)
static Constant * get(StructType *T, ArrayRef< Constant * > V)
static Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
static Constant * getAllOnesValue(Type *Ty)
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Constrained floating point compare intrinsics.
This is the common base class for constrained floating point intrinsics.
std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
std::optional< RoundingMode > getRoundingMode() const
Wrapper for a function that represents a value that functionally represents the original function.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
static bool compare(const APFloat &LHS, const APFloat &RHS, FCmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
Class to represent fixed width SIMD vectors.
unsigned getNumElements() const
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
DenormalMode getDenormalMode(const fltSemantics &FPType) const
Returns the denormal handling type for the default rounding mode of the function.
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags inBounds()
GEPNoWrapFlags withoutNoUnsignedSignedWrap() const
bool hasNoUnsignedSignedWrap() const
static Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
PointerType * getType() const
Global values are always pointers.
const DataLayout & getDataLayout() const
Get the data layout of the module this global belongs to.
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
static bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
const Function * getFunction() const
Return the function this instruction belongs to.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
This is an important class for using LLVM in a threaded context.
static APInt getSaturationPoint(Intrinsic::ID ID, unsigned numBits)
Min/max intrinsics are monotonic, they operate on a fixed-bitwidth values, so there is a certain thre...
ICmpInst::Predicate getPredicate() const
Returns the comparison predicate underlying the intrinsic.
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Class to represent scalable SIMD vectors.
void push_back(const T &Elt)
pointer data()
Return a pointer to the vector's buffer, even if empty().
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
unsigned getElementContainingOffset(uint64_t FixedOffset) const
Given a valid byte offset into the structure, returns the structure index that contains it.
TypeSize getElementOffset(unsigned Idx) const
Class to represent struct types.
Provides information about what library functions are available for the current target.
bool has(LibFunc F) const
Tests whether a library function is available.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
The instances of the Type class are immutable: once they are created, they are never changed.
unsigned getIntegerBitWidth() const
Type * getStructElementType(unsigned N) const
const fltSemantics & getFltSemantics() const
bool isVectorTy() const
True if this is an instance of VectorType.
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
bool isPointerTy() const
True if this is an instance of PointerType.
static IntegerType * getInt1Ty(LLVMContext &C)
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
bool isBFloatTy() const
Return true if this is 'bfloat', a 16-bit bfloat type.
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
@ HalfTyID
16-bit floating point type
@ FloatTyID
32-bit floating point type
@ DoubleTyID
64-bit floating point type
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool isFP128Ty() const
Return true if this is 'fp128'.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isStructTy() const
True if this is an instance of StructType.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
static IntegerType * getInt16Ty(LLVMContext &C)
bool isAggregateType() const
Return true if the type is an aggregate type.
bool isHalfTy() const
Return true if this is 'half', a 16-bit IEEE fp type.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
static IntegerType * getInt8Ty(LLVMContext &C)
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
bool isX86_AMXTy() const
Return true if this is X86 AMX.
static IntegerType * getInt32Ty(LLVMContext &C)
static IntegerType * getInt64Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
TypeID getTypeID() const
Return the type id for the type.
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Type * getContainedType(unsigned i) const
This method is used to implement the type iterator (defined at the end of the file).
bool isIEEELikeFPTy() const
Return true if this is a well-behaved IEEE-like type, which has a IEEE compatible layout as defined b...
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
LLVMContext & getContext() const
All values hold a context through their type.
Type * getElementType() const
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
const ParentTy * getParent() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
const APInt & smin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be signed.
const APInt & smax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be signed.
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
const APInt & umax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be unsigned.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
@ SC
CHAIN = SC CHAIN, Imm128 - System call.
@ CE
Windows NT (Windows on ARM)
@ ebStrict
This corresponds to "fpexcept.strict".
@ ebIgnore
This corresponds to "fpexcept.ignore".
NodeAddr< FuncNode * > Func
std::error_code status(const Twine &path, file_status &result, bool follow=true)
Get file status as if by POSIX stat().
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Constant * ConstantFoldBinaryIntrinsic(Intrinsic::ID ID, Constant *LHS, Constant *RHS, Type *Ty, Instruction *FMFSource)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Constant * ConstantFoldLoadThroughBitcast(Constant *C, Type *DestTy, const DataLayout &DL)
ConstantFoldLoadThroughBitcast - try to cast constant to destination type returning null if unsuccess...
static double log2(double V)
Constant * ConstantFoldSelectInstruction(Constant *Cond, Constant *V1, Constant *V2)
Attempt to constant fold a select instruction with the specified operands.
Constant * ConstantFoldFPInstOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL, const Instruction *I, bool AllowNonDeterministic=true)
Attempt to constant fold a floating point binary operation with the specified operands,...
bool canConstantFoldCallTo(const CallBase *Call, const Function *F)
canConstantFoldCallTo - Return true if its even possible to fold a call to the specified function.
unsigned getPointerAddressSpace(const Type *T)
APFloat abs(APFloat X)
Returns the absolute value of the argument.
Constant * ConstantFoldCompareInstruction(CmpInst::Predicate Predicate, Constant *C1, Constant *C2)
Constant * ConstantFoldUnaryInstruction(unsigned Opcode, Constant *V)
bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, APInt &Offset, const DataLayout &DL, DSOLocalEquivalent **DSOEquiv=nullptr)
If this constant is a constant offset from a global, return the global and the constant.
bool isMathLibCallNoop(const CallBase *Call, const TargetLibraryInfo *TLI)
Check whether the given call has no side-effects.
Constant * ReadByteArrayFromGlobal(const GlobalVariable *GV, uint64_t Offset)
LLVM_READONLY APFloat maximum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 maximum semantics.
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
Constant * ConstantFoldCall(const CallBase *Call, Function *F, ArrayRef< Constant * > Operands, const TargetLibraryInfo *TLI=nullptr, bool AllowNonDeterministic=true)
ConstantFoldCall - Attempt to constant fold a call to the specified function with the specified argum...
APFloat frexp(const APFloat &X, int &Exp, APFloat::roundingMode RM)
Equivalent of C standard library function.
Constant * ConstantFoldExtractValueInstruction(Constant *Agg, ArrayRef< unsigned > Idxs)
Attempt to constant fold an extractvalue instruction with the specified operands and indices.
Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Fold the constant using the specified DataLayout.
LLVM_READONLY APFloat maxnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2019 maximumNumber semantics.
Constant * ConstantFoldLoadFromUniformValue(Constant *C, Type *Ty, const DataLayout &DL)
If C is a uniform value where all bits are the same (either all zero, all ones, all undef or all pois...
Constant * ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op, const DataLayout &DL)
Attempt to constant fold a unary operation with the specified operand.
Constant * FlushFPConstant(Constant *Operand, const Instruction *I, bool IsOutput)
Attempt to flush float point constant according to denormal mode set in the instruction's parent func...
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
APFloat scalbn(APFloat X, int Exp, APFloat::roundingMode RM)
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Constant * ConstantFoldInstOperands(Instruction *I, ArrayRef< Constant * > Ops, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, bool AllowNonDeterministic=true)
ConstantFoldInstOperands - Attempt to constant fold an instruction with the specified operands.
Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
Constant * ConstantFoldLoadFromConst(Constant *C, Type *Ty, const APInt &Offset, const DataLayout &DL)
Extract value of C at the given Offset reinterpreted as Ty.
Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
LLVM_READONLY APFloat minnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2019 minimumNumber semantics.
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
DWARFExpression::Operation Op
Constant * ConstantFoldInstruction(Instruction *I, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldInstruction - Try to constant fold the specified instruction.
RoundingMode
Rounding mode.
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
constexpr unsigned BitWidth
bool isVectorIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, unsigned ScalarOpdIdx)
Identifies if the vector form of the intrinsic has a scalar operand.
Constant * ConstantFoldCastInstruction(unsigned opcode, Constant *V, Type *DestTy)
Constant * ConstantFoldInsertValueInstruction(Constant *Agg, Constant *Val, ArrayRef< unsigned > Idxs)
ConstantFoldInsertValueInstruction - Attempt to constant fold an insertvalue instruction with the spe...
Constant * ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, APInt Offset, const DataLayout &DL)
Return the value that a load from C with offset Offset would produce if it is constant and determinab...
LLVM_READONLY APFloat minimum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 minimum semantics.
Constant * ConstantFoldIntegerCast(Constant *C, Type *DestTy, bool IsSigned, const DataLayout &DL)
Constant fold a zext, sext or trunc, depending on IsSigned and whether the DestTy is wider or narrowe...
Constant * ConstantFoldBinaryInstruction(unsigned Opcode, Constant *V1, Constant *V2)
opStatus
IEEE-754R 7: Default exception handling.
Represent subnormal handling kind for floating point instruction inputs and outputs.
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
DenormalModeKind
Represent handled modes for denormal (aka subnormal) modes in the floating point environment.
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
@ PositiveZero
Denormals are flushed to positive zero.
@ Dynamic
Denormals have unknown treatment.
@ IEEE
IEEE-754 denormal numbers preserved.
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
static constexpr DenormalMode getIEEE()
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
bool isConstant() const
Returns true if we know the value of all bits.
const APInt & getConstant() const
Returns the value when all bits have a known value.