31#include "llvm/Config/config.h"
45#include "llvm/IR/IntrinsicsAArch64.h"
46#include "llvm/IR/IntrinsicsAMDGPU.h"
47#include "llvm/IR/IntrinsicsARM.h"
48#include "llvm/IR/IntrinsicsWebAssembly.h"
49#include "llvm/IR/IntrinsicsX86.h"
77 unsigned BitShift =
DL.getTypeSizeInBits(SrcEltTy);
78 for (
unsigned i = 0; i != NumSrcElts; ++i) {
80 if (
DL.isLittleEndian())
81 Element =
C->getAggregateElement(NumSrcElts - i - 1);
83 Element =
C->getAggregateElement(i);
85 if (Element && isa<UndefValue>(Element)) {
90 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
95 Result |= ElementCI->getValue().zext(
Result.getBitWidth());
106 "Invalid constantexpr bitcast!");
112 if (
auto *VTy = dyn_cast<VectorType>(
C->getType())) {
115 unsigned NumSrcElts = cast<FixedVectorType>(VTy)->getNumElements();
116 Type *SrcEltTy = VTy->getElementType();
129 if (
Constant *CE = foldConstVectorToAPInt(Result, DestTy,
C,
130 SrcEltTy, NumSrcElts,
DL))
133 if (isa<IntegerType>(DestTy))
134 return ConstantInt::get(DestTy, Result);
142 auto *DestVTy = dyn_cast<VectorType>(DestTy);
148 if (isa<ConstantFP>(
C) || isa<ConstantInt>(
C)) {
154 if (!isa<ConstantDataVector>(
C) && !isa<ConstantVector>(
C))
158 unsigned NumDstElt = cast<FixedVectorType>(DestVTy)->getNumElements();
159 unsigned NumSrcElt = cast<FixedVectorType>(
C->getType())->getNumElements();
160 if (NumDstElt == NumSrcElt)
163 Type *SrcEltTy = cast<VectorType>(
C->getType())->getElementType();
164 Type *DstEltTy = DestVTy->getElementType();
197 if (!isa<ConstantVector>(
C) &&
198 !isa<ConstantDataVector>(
C))
206 bool isLittleEndian =
DL.isLittleEndian();
209 if (NumDstElt < NumSrcElt) {
212 unsigned Ratio = NumSrcElt/NumDstElt;
215 for (
unsigned i = 0; i != NumDstElt; ++i) {
218 unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1);
219 for (
unsigned j = 0;
j != Ratio; ++
j) {
220 Constant *Src =
C->getAggregateElement(SrcElt++);
221 if (Src && isa<UndefValue>(Src))
223 cast<VectorType>(
C->getType())->getElementType());
225 Src = dyn_cast_or_null<ConstantInt>(Src);
232 assert(Src &&
"Constant folding cannot fail on plain integers");
236 Instruction::Shl, Src, ConstantInt::get(Src->getType(), ShiftAmt),
238 assert(Src &&
"Constant folding cannot fail on plain integers");
240 ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
244 assert(Elt &&
"Constant folding cannot fail on plain integers");
252 unsigned Ratio = NumDstElt/NumSrcElt;
253 unsigned DstBitSize =
DL.getTypeSizeInBits(DstEltTy);
256 for (
unsigned i = 0; i != NumSrcElt; ++i) {
257 auto *Element =
C->getAggregateElement(i);
262 if (isa<UndefValue>(Element)) {
268 auto *Src = dyn_cast<ConstantInt>(Element);
272 unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1);
273 for (
unsigned j = 0;
j != Ratio; ++
j) {
276 APInt Elt = Src->getValue().lshr(ShiftAmt);
277 ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
280 Result.push_back(ConstantInt::get(DstEltTy, Elt.
trunc(DstBitSize)));
298 if ((GV = dyn_cast<GlobalValue>(
C))) {
304 if (
auto *FoundDSOEquiv = dyn_cast<DSOLocalEquivalent>(
C)) {
306 *DSOEquiv = FoundDSOEquiv;
307 GV = FoundDSOEquiv->getGlobalValue();
314 auto *CE = dyn_cast<ConstantExpr>(
C);
315 if (!CE)
return false;
318 if (CE->getOpcode() == Instruction::PtrToInt ||
319 CE->getOpcode() == Instruction::BitCast)
324 auto *
GEP = dyn_cast<GEPOperator>(CE);
328 unsigned BitWidth =
DL.getIndexTypeSizeInBits(
GEP->getType());
337 if (!
GEP->accumulateConstantOffset(
DL, TmpOffset))
347 Type *SrcTy =
C->getType();
351 TypeSize DestSize =
DL.getTypeSizeInBits(DestTy);
352 TypeSize SrcSize =
DL.getTypeSizeInBits(SrcTy);
353 if (!TypeSize::isKnownGE(SrcSize, DestSize))
364 if (SrcSize == DestSize &&
371 Cast = Instruction::IntToPtr;
373 Cast = Instruction::PtrToInt;
394 ElemC =
C->getAggregateElement(Elem++);
395 }
while (ElemC &&
DL.getTypeSizeInBits(ElemC->
getType()).isZero());
400 if (
auto *VT = dyn_cast<VectorType>(SrcTy))
401 if (!
DL.typeSizeEqualsStoreSize(VT->getElementType()))
404 C =
C->getAggregateElement(0u);
419 assert(ByteOffset <=
DL.getTypeAllocSize(
C->getType()) &&
420 "Out of range access");
424 if (isa<ConstantAggregateZero>(
C) || isa<UndefValue>(
C))
427 if (
auto *CI = dyn_cast<ConstantInt>(
C)) {
428 if ((CI->getBitWidth() & 7) != 0)
430 const APInt &Val = CI->getValue();
431 unsigned IntBytes =
unsigned(CI->getBitWidth()/8);
433 for (
unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) {
434 unsigned n = ByteOffset;
435 if (!
DL.isLittleEndian())
436 n = IntBytes - n - 1;
443 if (
auto *CFP = dyn_cast<ConstantFP>(
C)) {
444 if (CFP->getType()->isDoubleTy()) {
446 return ReadDataFromGlobal(
C, ByteOffset, CurPtr, BytesLeft,
DL);
448 if (CFP->getType()->isFloatTy()){
450 return ReadDataFromGlobal(
C, ByteOffset, CurPtr, BytesLeft,
DL);
452 if (CFP->getType()->isHalfTy()){
454 return ReadDataFromGlobal(
C, ByteOffset, CurPtr, BytesLeft,
DL);
459 if (
auto *CS = dyn_cast<ConstantStruct>(
C)) {
463 ByteOffset -= CurEltOffset;
468 uint64_t EltSize =
DL.getTypeAllocSize(CS->getOperand(
Index)->getType());
470 if (ByteOffset < EltSize &&
471 !ReadDataFromGlobal(CS->getOperand(
Index), ByteOffset, CurPtr,
478 if (
Index == CS->getType()->getNumElements())
484 if (BytesLeft <= NextEltOffset - CurEltOffset - ByteOffset)
488 CurPtr += NextEltOffset - CurEltOffset - ByteOffset;
489 BytesLeft -= NextEltOffset - CurEltOffset - ByteOffset;
491 CurEltOffset = NextEltOffset;
496 if (isa<ConstantArray>(
C) || isa<ConstantVector>(
C) ||
497 isa<ConstantDataSequential>(
C)) {
500 if (
auto *AT = dyn_cast<ArrayType>(
C->getType())) {
501 NumElts = AT->getNumElements();
502 EltTy = AT->getElementType();
503 EltSize =
DL.getTypeAllocSize(EltTy);
505 NumElts = cast<FixedVectorType>(
C->getType())->getNumElements();
506 EltTy = cast<FixedVectorType>(
C->getType())->getElementType();
509 if (!
DL.typeSizeEqualsStoreSize(EltTy))
512 EltSize =
DL.getTypeStoreSize(EltTy);
518 if (!ReadDataFromGlobal(
C->getAggregateElement(
Index),
Offset, CurPtr,
523 assert(BytesWritten <= EltSize &&
"Not indexing into this element?");
524 if (BytesWritten >= BytesLeft)
528 BytesLeft -= BytesWritten;
529 CurPtr += BytesWritten;
534 if (
auto *CE = dyn_cast<ConstantExpr>(
C)) {
535 if (
CE->getOpcode() == Instruction::IntToPtr &&
536 CE->getOperand(0)->getType() ==
DL.getIntPtrType(
CE->getType())) {
537 return ReadDataFromGlobal(
CE->getOperand(0), ByteOffset, CurPtr,
549 if (isa<ScalableVectorType>(LoadTy))
552 auto *IntType = dyn_cast<IntegerType>(LoadTy);
565 DL.getTypeSizeInBits(LoadTy).getFixedValue());
567 if (Res->isNullValue() && !LoadTy->
isX86_MMXTy() &&
575 if (Res->isNullValue() && !LoadTy->
isX86_MMXTy() &&
588 unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8;
589 if (BytesLoaded > 32 || BytesLoaded == 0)
593 if (
Offset <= -1 *
static_cast<int64_t
>(BytesLoaded))
597 TypeSize InitializerSize =
DL.getTypeAllocSize(
C->getType());
605 unsigned char RawBytes[32] = {0};
606 unsigned char *CurPtr = RawBytes;
607 unsigned BytesLeft = BytesLoaded;
616 if (!ReadDataFromGlobal(
C,
Offset, CurPtr, BytesLeft,
DL))
619 APInt ResultVal =
APInt(IntType->getBitWidth(), 0);
620 if (
DL.isLittleEndian()) {
621 ResultVal = RawBytes[BytesLoaded - 1];
622 for (
unsigned i = 1; i != BytesLoaded; ++i) {
624 ResultVal |= RawBytes[BytesLoaded - 1 - i];
627 ResultVal = RawBytes[0];
628 for (
unsigned i = 1; i != BytesLoaded; ++i) {
630 ResultVal |= RawBytes[i];
634 return ConstantInt::get(IntType->getContext(), ResultVal);
654 if (NBytes > UINT16_MAX)
662 unsigned char *CurPtr = RawBytes.
data();
664 if (!ReadDataFromGlobal(
Init,
Offset, CurPtr, NBytes,
DL))
677 if (!isa<ConstantAggregate>(
Base) && !isa<ConstantDataSequential>(
Base))
682 if (!
Offset.isZero() || !Indices[0].isZero())
687 if (
Index.isNegative() ||
Index.getActiveBits() >= 32)
690 C =
C->getAggregateElement(
Index.getZExtValue());
716 if (
Offset.getSignificantBits() <= 64)
718 FoldReinterpretLoadFromConst(
C, Ty,
Offset.getSExtValue(),
DL))
735 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
738 C = cast<Constant>(
C->stripAndAccumulateConstantOffsets(
759 if (isa<PoisonValue>(
C))
761 if (isa<UndefValue>(
C))
765 if (!
DL.typeSizeEqualsStoreSize(
C->getType()))
769 if (
C->isAllOnesValue() &&
789 if (Opc == Instruction::And) {
792 if ((Known1.
One | Known0.
Zero).isAllOnes()) {
796 if ((Known0.
One | Known1.
Zero).isAllOnes()) {
808 if (Opc == Instruction::Sub) {
814 unsigned OpSize =
DL.getTypeSizeInBits(Op0->
getType());
830 Type *ResultTy,
bool InBounds,
831 std::optional<unsigned> InRangeIndex,
833 Type *IntIdxTy =
DL.getIndexType(ResultTy);
838 for (
unsigned i = 1, e = Ops.
size(); i != e; ++i) {
841 SrcElemTy, Ops.
slice(1, i - 1)))) &&
842 Ops[i]->getType()->getScalarType() != IntIdxScalarTy) {
845 Ops[i]->getType()->isVectorTy() ? IntIdxTy : IntIdxScalarTy;
860 SrcElemTy, Ops[0], NewIdxs, InBounds, InRangeIndex);
870 bool InBounds =
GEP->isInBounds();
872 Type *SrcElemTy =
GEP->getSourceElementType();
873 Type *ResElemTy =
GEP->getResultElementType();
875 if (!SrcElemTy->
isSized() || isa<ScalableVectorType>(SrcElemTy))
878 if (
Constant *
C = CastGEPIndices(SrcElemTy, Ops, ResTy,
879 GEP->isInBounds(),
GEP->getInRangeIndex(),
884 if (!
Ptr->getType()->isPointerTy())
887 Type *IntIdxTy =
DL.getIndexType(
Ptr->getType());
889 for (
unsigned i = 1, e = Ops.
size(); i != e; ++i)
890 if (!isa<ConstantInt>(Ops[i]))
893 unsigned BitWidth =
DL.getTypeSizeInBits(IntIdxTy);
896 DL.getIndexedOffsetInType(
900 while (
auto *
GEP = dyn_cast<GEPOperator>(
Ptr)) {
902 InBounds &=
GEP->isInBounds();
907 bool AllConstantInt =
true;
908 for (
Value *NestedOp : NestedOps)
909 if (!isa<ConstantInt>(NestedOp)) {
910 AllConstantInt =
false;
916 Ptr = cast<Constant>(
GEP->getOperand(0));
917 SrcElemTy =
GEP->getSourceElementType();
924 if (
auto *CE = dyn_cast<ConstantExpr>(
Ptr)) {
925 if (
CE->getOpcode() == Instruction::IntToPtr) {
926 if (
auto *
Base = dyn_cast<ConstantInt>(
CE->getOperand(0)))
931 auto *PTy = cast<PointerType>(
Ptr->getType());
932 if ((
Ptr->isNullValue() || BasePtr != 0) &&
933 !
DL.isNonIntegralPointerType(PTy)) {
943 if (
auto *GV = dyn_cast<GlobalValue>(
Ptr))
944 SrcElemTy = GV->getValueType();
951 Type *ElemTy = SrcElemTy;
960 while (ElemTy != ResElemTy) {
976 std::optional<unsigned> InRangeIndex;
977 if (std::optional<unsigned> LastIRIndex = InnermostGEP->
getInRangeIndex())
979 NewIdxs.
size() > *LastIRIndex) {
980 InRangeIndex = LastIRIndex;
981 for (
unsigned I = 0;
I <= *LastIRIndex; ++
I)
996Constant *ConstantFoldInstOperandsImpl(
const Value *InstOrCE,
unsigned Opcode,
1009 case Instruction::FAdd:
1010 case Instruction::FSub:
1011 case Instruction::FMul:
1012 case Instruction::FDiv:
1013 case Instruction::FRem:
1017 if (
const auto *
I = dyn_cast<Instruction>(InstOrCE)) {
1027 if (
auto *
GEP = dyn_cast<GEPOperator>(InstOrCE)) {
1028 Type *SrcElemTy =
GEP->getSourceElementType();
1037 GEP->getInRangeIndex());
1040 if (
auto *CE = dyn_cast<ConstantExpr>(InstOrCE)) {
1041 if (
CE->isCompare())
1044 return CE->getWithOperands(Ops);
1048 default:
return nullptr;
1049 case Instruction::ICmp:
1050 case Instruction::FCmp: {
1051 auto *
C = cast<CmpInst>(InstOrCE);
1055 case Instruction::Freeze:
1057 case Instruction::Call:
1058 if (
auto *
F = dyn_cast<Function>(Ops.
back())) {
1059 const auto *
Call = cast<CallBase>(InstOrCE);
1064 case Instruction::Select:
1066 case Instruction::ExtractElement:
1068 case Instruction::ExtractValue:
1070 Ops[0], cast<ExtractValueInst>(InstOrCE)->getIndices());
1071 case Instruction::InsertElement:
1073 case Instruction::InsertValue:
1075 Ops[0], Ops[1], cast<InsertValueInst>(InstOrCE)->getIndices());
1076 case Instruction::ShuffleVector:
1078 Ops[0], Ops[1], cast<ShuffleVectorInst>(InstOrCE)->getShuffleMask());
1079 case Instruction::Load: {
1080 const auto *LI = dyn_cast<LoadInst>(InstOrCE);
1081 if (LI->isVolatile())
1100 if (!isa<ConstantVector>(
C) && !isa<ConstantExpr>(
C))
1104 for (
const Use &OldU :
C->operands()) {
1105 Constant *OldC = cast<Constant>(&OldU);
1109 if (isa<ConstantVector>(OldC) || isa<ConstantExpr>(OldC)) {
1110 auto It = FoldedOps.
find(OldC);
1111 if (It == FoldedOps.
end()) {
1112 NewC = ConstantFoldConstantImpl(OldC,
DL, TLI, FoldedOps);
1113 FoldedOps.
insert({OldC, NewC});
1121 if (
auto *CE = dyn_cast<ConstantExpr>(
C)) {
1123 ConstantFoldInstOperandsImpl(CE,
CE->getOpcode(), Ops,
DL, TLI))
1128 assert(isa<ConstantVector>(
C));
1137 if (
auto *PN = dyn_cast<PHINode>(
I)) {
1153 C = ConstantFoldConstantImpl(
C,
DL, TLI, FoldedOps);
1156 if (CommonValue &&
C != CommonValue)
1167 if (!
all_of(
I->operands(), [](
Use &U) { return isa<Constant>(U); }))
1172 for (
const Use &OpU :
I->operands()) {
1173 auto *
Op = cast<Constant>(&OpU);
1175 Op = ConstantFoldConstantImpl(
Op,
DL, TLI, FoldedOps);
1185 return ConstantFoldConstantImpl(
C,
DL, TLI, FoldedOps);
1192 return ConstantFoldInstOperandsImpl(
I,
I->getOpcode(), Ops,
DL, TLI);
1209 if (
auto *CE0 = dyn_cast<ConstantExpr>(Ops0)) {
1211 if (CE0->getOpcode() == Instruction::IntToPtr) {
1212 Type *IntPtrTy =
DL.getIntPtrType(CE0->getType());
1224 if (CE0->getOpcode() == Instruction::PtrToInt) {
1225 Type *IntPtrTy =
DL.getIntPtrType(CE0->getOperand(0)->getType());
1226 if (CE0->getType() == IntPtrTy) {
1234 if (
auto *CE1 = dyn_cast<ConstantExpr>(Ops1)) {
1235 if (CE0->getOpcode() == CE1->getOpcode()) {
1236 if (CE0->getOpcode() == Instruction::IntToPtr) {
1237 Type *IntPtrTy =
DL.getIntPtrType(CE0->getType());
1251 if (CE0->getOpcode() == Instruction::PtrToInt) {
1252 Type *IntPtrTy =
DL.getIntPtrType(CE0->getOperand(0)->getType());
1253 if (CE0->getType() == IntPtrTy &&
1254 CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType()) {
1256 Predicate, CE0->getOperand(0), CE1->getOperand(0),
DL, TLI);
1268 unsigned IndexWidth =
DL.getIndexTypeSizeInBits(Ops0->
getType());
1269 APInt Offset0(IndexWidth, 0);
1272 APInt Offset1(IndexWidth, 0);
1275 if (Stripped0 == Stripped1)
1278 ConstantInt::get(CE0->getContext(), Offset0),
1279 ConstantInt::get(CE0->getContext(), Offset1));
1281 }
else if (isa<ConstantExpr>(Ops1)) {
1284 Predicate = ICmpInst::getSwappedPredicate(Predicate);
1311 if (isa<ConstantExpr>(
LHS) || isa<ConstantExpr>(
RHS))
1322 if (!
I || !
I->getParent() || !
I->getFunction())
1325 ConstantFP *CFP = dyn_cast<ConstantFP>(Operand);
1348 return ConstantFP::get(
1394 case Instruction::PtrToInt:
1395 if (
auto *CE = dyn_cast<ConstantExpr>(
C)) {
1399 if (CE->getOpcode() == Instruction::IntToPtr) {
1402 DL.getIntPtrType(CE->getType()),
1404 }
else if (
auto *
GEP = dyn_cast<GEPOperator>(CE)) {
1408 unsigned BitWidth =
DL.getIndexTypeSizeInBits(
GEP->getType());
1410 auto *
Base = cast<Constant>(
GEP->stripAndAccumulateConstantOffsets(
1411 DL, BaseOffset,
true));
1412 if (
Base->isNullValue()) {
1413 FoldedValue = ConstantInt::get(CE->getContext(), BaseOffset);
1416 if (
GEP->getNumIndices() == 1 &&
1417 GEP->getSourceElementType()->isIntegerTy(8)) {
1418 auto *
Ptr = cast<Constant>(
GEP->getPointerOperand());
1419 auto *Sub = dyn_cast<ConstantExpr>(
GEP->getOperand(1));
1420 Type *IntIdxTy =
DL.getIndexType(
Ptr->getType());
1421 if (Sub && Sub->getType() == IntIdxTy &&
1422 Sub->getOpcode() == Instruction::Sub &&
1423 Sub->getOperand(0)->isNullValue())
1436 case Instruction::IntToPtr:
1441 if (
auto *CE = dyn_cast<ConstantExpr>(
C)) {
1442 if (CE->getOpcode() == Instruction::PtrToInt) {
1443 Constant *SrcPtr = CE->getOperand(0);
1444 unsigned SrcPtrSize =
DL.getPointerTypeSizeInBits(SrcPtr->
getType());
1445 unsigned MidIntSize = CE->getType()->getScalarSizeInBits();
1447 if (MidIntSize >= SrcPtrSize) {
1455 case Instruction::Trunc:
1456 case Instruction::ZExt:
1457 case Instruction::SExt:
1458 case Instruction::FPTrunc:
1459 case Instruction::FPExt:
1460 case Instruction::UIToFP:
1461 case Instruction::SIToFP:
1462 case Instruction::FPToUI:
1463 case Instruction::FPToSI:
1464 case Instruction::AddrSpaceCast:
1466 case Instruction::BitCast:
1477 Type *SrcTy =
C->getType();
1478 if (SrcTy == DestTy)
1492 if (Call->isNoBuiltin())
1494 if (Call->getFunctionType() !=
F->getFunctionType())
1496 switch (
F->getIntrinsicID()) {
1499 case Intrinsic::bswap:
1500 case Intrinsic::ctpop:
1501 case Intrinsic::ctlz:
1502 case Intrinsic::cttz:
1503 case Intrinsic::fshl:
1504 case Intrinsic::fshr:
1505 case Intrinsic::launder_invariant_group:
1506 case Intrinsic::strip_invariant_group:
1507 case Intrinsic::masked_load:
1508 case Intrinsic::get_active_lane_mask:
1509 case Intrinsic::abs:
1510 case Intrinsic::smax:
1511 case Intrinsic::smin:
1512 case Intrinsic::umax:
1513 case Intrinsic::umin:
1514 case Intrinsic::sadd_with_overflow:
1515 case Intrinsic::uadd_with_overflow:
1516 case Intrinsic::ssub_with_overflow:
1517 case Intrinsic::usub_with_overflow:
1518 case Intrinsic::smul_with_overflow:
1519 case Intrinsic::umul_with_overflow:
1520 case Intrinsic::sadd_sat:
1521 case Intrinsic::uadd_sat:
1522 case Intrinsic::ssub_sat:
1523 case Intrinsic::usub_sat:
1524 case Intrinsic::smul_fix:
1525 case Intrinsic::smul_fix_sat:
1526 case Intrinsic::bitreverse:
1527 case Intrinsic::is_constant:
1528 case Intrinsic::vector_reduce_add:
1529 case Intrinsic::vector_reduce_mul:
1530 case Intrinsic::vector_reduce_and:
1531 case Intrinsic::vector_reduce_or:
1532 case Intrinsic::vector_reduce_xor:
1533 case Intrinsic::vector_reduce_smin:
1534 case Intrinsic::vector_reduce_smax:
1535 case Intrinsic::vector_reduce_umin:
1536 case Intrinsic::vector_reduce_umax:
1538 case Intrinsic::amdgcn_perm:
1539 case Intrinsic::amdgcn_wave_reduce_umin:
1540 case Intrinsic::amdgcn_wave_reduce_umax:
1541 case Intrinsic::amdgcn_s_wqm:
1542 case Intrinsic::amdgcn_s_quadmask:
1543 case Intrinsic::amdgcn_s_bitreplicate:
1544 case Intrinsic::arm_mve_vctp8:
1545 case Intrinsic::arm_mve_vctp16:
1546 case Intrinsic::arm_mve_vctp32:
1547 case Intrinsic::arm_mve_vctp64:
1548 case Intrinsic::aarch64_sve_convert_from_svbool:
1550 case Intrinsic::wasm_trunc_signed:
1551 case Intrinsic::wasm_trunc_unsigned:
1556 case Intrinsic::minnum:
1557 case Intrinsic::maxnum:
1558 case Intrinsic::minimum:
1559 case Intrinsic::maximum:
1560 case Intrinsic::log:
1561 case Intrinsic::log2:
1562 case Intrinsic::log10:
1563 case Intrinsic::exp:
1564 case Intrinsic::exp2:
1565 case Intrinsic::exp10:
1566 case Intrinsic::sqrt:
1567 case Intrinsic::sin:
1568 case Intrinsic::cos:
1569 case Intrinsic::pow:
1570 case Intrinsic::powi:
1571 case Intrinsic::ldexp:
1572 case Intrinsic::fma:
1573 case Intrinsic::fmuladd:
1574 case Intrinsic::frexp:
1575 case Intrinsic::fptoui_sat:
1576 case Intrinsic::fptosi_sat:
1577 case Intrinsic::convert_from_fp16:
1578 case Intrinsic::convert_to_fp16:
1579 case Intrinsic::amdgcn_cos:
1580 case Intrinsic::amdgcn_cubeid:
1581 case Intrinsic::amdgcn_cubema:
1582 case Intrinsic::amdgcn_cubesc:
1583 case Intrinsic::amdgcn_cubetc:
1584 case Intrinsic::amdgcn_fmul_legacy:
1585 case Intrinsic::amdgcn_fma_legacy:
1586 case Intrinsic::amdgcn_fract:
1587 case Intrinsic::amdgcn_sin:
1589 case Intrinsic::x86_sse_cvtss2si:
1590 case Intrinsic::x86_sse_cvtss2si64:
1591 case Intrinsic::x86_sse_cvttss2si:
1592 case Intrinsic::x86_sse_cvttss2si64:
1593 case Intrinsic::x86_sse2_cvtsd2si:
1594 case Intrinsic::x86_sse2_cvtsd2si64:
1595 case Intrinsic::x86_sse2_cvttsd2si:
1596 case Intrinsic::x86_sse2_cvttsd2si64:
1597 case Intrinsic::x86_avx512_vcvtss2si32:
1598 case Intrinsic::x86_avx512_vcvtss2si64:
1599 case Intrinsic::x86_avx512_cvttss2si:
1600 case Intrinsic::x86_avx512_cvttss2si64:
1601 case Intrinsic::x86_avx512_vcvtsd2si32:
1602 case Intrinsic::x86_avx512_vcvtsd2si64:
1603 case Intrinsic::x86_avx512_cvttsd2si:
1604 case Intrinsic::x86_avx512_cvttsd2si64:
1605 case Intrinsic::x86_avx512_vcvtss2usi32:
1606 case Intrinsic::x86_avx512_vcvtss2usi64:
1607 case Intrinsic::x86_avx512_cvttss2usi:
1608 case Intrinsic::x86_avx512_cvttss2usi64:
1609 case Intrinsic::x86_avx512_vcvtsd2usi32:
1610 case Intrinsic::x86_avx512_vcvtsd2usi64:
1611 case Intrinsic::x86_avx512_cvttsd2usi:
1612 case Intrinsic::x86_avx512_cvttsd2usi64:
1613 return !Call->isStrictFP();
1617 case Intrinsic::fabs:
1618 case Intrinsic::copysign:
1619 case Intrinsic::is_fpclass:
1622 case Intrinsic::ceil:
1623 case Intrinsic::floor:
1624 case Intrinsic::round:
1625 case Intrinsic::roundeven:
1626 case Intrinsic::trunc:
1627 case Intrinsic::nearbyint:
1628 case Intrinsic::rint:
1629 case Intrinsic::canonicalize:
1632 case Intrinsic::experimental_constrained_fma:
1633 case Intrinsic::experimental_constrained_fmuladd:
1634 case Intrinsic::experimental_constrained_fadd:
1635 case Intrinsic::experimental_constrained_fsub:
1636 case Intrinsic::experimental_constrained_fmul:
1637 case Intrinsic::experimental_constrained_fdiv:
1638 case Intrinsic::experimental_constrained_frem:
1639 case Intrinsic::experimental_constrained_ceil:
1640 case Intrinsic::experimental_constrained_floor:
1641 case Intrinsic::experimental_constrained_round:
1642 case Intrinsic::experimental_constrained_roundeven:
1643 case Intrinsic::experimental_constrained_trunc:
1644 case Intrinsic::experimental_constrained_nearbyint:
1645 case Intrinsic::experimental_constrained_rint:
1646 case Intrinsic::experimental_constrained_fcmp:
1647 case Intrinsic::experimental_constrained_fcmps:
1654 if (!
F->hasName() || Call->isStrictFP())
1665 return Name ==
"acos" ||
Name ==
"acosf" ||
1666 Name ==
"asin" ||
Name ==
"asinf" ||
1667 Name ==
"atan" ||
Name ==
"atanf" ||
1668 Name ==
"atan2" ||
Name ==
"atan2f";
1670 return Name ==
"ceil" ||
Name ==
"ceilf" ||
1674 return Name ==
"exp" ||
Name ==
"expf" ||
1677 return Name ==
"fabs" ||
Name ==
"fabsf" ||
1678 Name ==
"floor" ||
Name ==
"floorf" ||
1681 return Name ==
"log" ||
Name ==
"logf" ||
1682 Name ==
"log2" ||
Name ==
"log2f" ||
1683 Name ==
"log10" ||
Name ==
"log10f";
1685 return Name ==
"nearbyint" ||
Name ==
"nearbyintf";
1687 return Name ==
"pow" ||
Name ==
"powf";
1689 return Name ==
"remainder" ||
Name ==
"remainderf" ||
1690 Name ==
"rint" ||
Name ==
"rintf" ||
1691 Name ==
"round" ||
Name ==
"roundf";
1693 return Name ==
"sin" ||
Name ==
"sinf" ||
1694 Name ==
"sinh" ||
Name ==
"sinhf" ||
1697 return Name ==
"tan" ||
Name ==
"tanf" ||
1698 Name ==
"tanh" ||
Name ==
"tanhf" ||
1699 Name ==
"trunc" ||
Name ==
"truncf";
1707 if (
Name.size() < 12 ||
Name[1] !=
'_')
1713 return Name ==
"__acos_finite" ||
Name ==
"__acosf_finite" ||
1714 Name ==
"__asin_finite" ||
Name ==
"__asinf_finite" ||
1715 Name ==
"__atan2_finite" ||
Name ==
"__atan2f_finite";
1717 return Name ==
"__cosh_finite" ||
Name ==
"__coshf_finite";
1719 return Name ==
"__exp_finite" ||
Name ==
"__expf_finite" ||
1720 Name ==
"__exp2_finite" ||
Name ==
"__exp2f_finite";
1722 return Name ==
"__log_finite" ||
Name ==
"__logf_finite" ||
1723 Name ==
"__log10_finite" ||
Name ==
"__log10f_finite";
1725 return Name ==
"__pow_finite" ||
Name ==
"__powf_finite";
1727 return Name ==
"__sinh_finite" ||
Name ==
"__sinhf_finite";
1738 APF.convert(Ty->
getFltSemantics(), APFloat::rmNearestTiesToEven, &unused);
1739 return ConstantFP::get(Ty->
getContext(), APF);
1747inline void llvm_fenv_clearexcept() {
1748#if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT
1749 feclearexcept(FE_ALL_EXCEPT);
1755inline bool llvm_fenv_testexcept() {
1756 int errno_val = errno;
1757 if (errno_val == ERANGE || errno_val == EDOM)
1759#if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT && HAVE_DECL_FE_INEXACT
1760 if (fetestexcept(FE_ALL_EXCEPT & ~FE_INEXACT))
1768 llvm_fenv_clearexcept();
1769 double Result = NativeFP(
V.convertToDouble());
1770 if (llvm_fenv_testexcept()) {
1771 llvm_fenv_clearexcept();
1775 return GetConstantFoldFPValue(Result, Ty);
1778Constant *ConstantFoldBinaryFP(
double (*NativeFP)(
double,
double),
1780 llvm_fenv_clearexcept();
1781 double Result = NativeFP(
V.convertToDouble(),
W.convertToDouble());
1782 if (llvm_fenv_testexcept()) {
1783 llvm_fenv_clearexcept();
1787 return GetConstantFoldFPValue(Result, Ty);
1797 if (isa<ConstantAggregateZero>(
Op))
1801 if (isa<PoisonValue>(
Op) ||
Op->containsPoisonElement())
1805 if (!isa<ConstantVector>(
Op) && !isa<ConstantDataVector>(
Op))
1808 auto *EltC = dyn_cast<ConstantInt>(
Op->getAggregateElement(0U));
1812 APInt Acc = EltC->getValue();
1814 if (!(EltC = dyn_cast<ConstantInt>(
Op->getAggregateElement(
I))))
1816 const APInt &
X = EltC->getValue();
1818 case Intrinsic::vector_reduce_add:
1821 case Intrinsic::vector_reduce_mul:
1824 case Intrinsic::vector_reduce_and:
1827 case Intrinsic::vector_reduce_or:
1830 case Intrinsic::vector_reduce_xor:
1833 case Intrinsic::vector_reduce_smin:
1836 case Intrinsic::vector_reduce_smax:
1839 case Intrinsic::vector_reduce_umin:
1842 case Intrinsic::vector_reduce_umax:
1848 return ConstantInt::get(
Op->getContext(), Acc);
1858Constant *ConstantFoldSSEConvertToInt(
const APFloat &Val,
bool roundTowardZero,
1859 Type *Ty,
bool IsSigned) {
1862 assert(ResultWidth <= 64 &&
1863 "Can only constant fold conversions to 64 and 32 bit ints");
1866 bool isExact =
false;
1868 : APFloat::rmNearestTiesToEven;
1871 IsSigned,
mode, &isExact);
1872 if (status != APFloat::opOK &&
1873 (!roundTowardZero || status != APFloat::opInexact))
1875 return ConstantInt::get(Ty, UIntVal, IsSigned);
1879 Type *Ty =
Op->getType();
1882 return Op->getValueAPF().convertToDouble();
1886 APF.
convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, &unused);
1891 if (
auto *CI = dyn_cast<ConstantInt>(
Op)) {
1892 C = &CI->getValue();
1895 if (isa<UndefValue>(
Op)) {
1914 if (St == APFloat::opStatus::opOK)
1919 if (ORM && *ORM == RoundingMode::Dynamic)
1924 if (EB && *EB != fp::ExceptionBehavior::ebStrict)
1936 if (!ORM || *ORM == RoundingMode::Dynamic)
1941 return RoundingMode::NearestTiesToEven;
1951 return ConstantFP::get(
1963 if (Src.isNormal() || Src.isInfinity())
1964 return ConstantFP::get(CI->
getContext(), Src);
1971 return ConstantFP::get(CI->
getContext(), Src);
2003 if (IntrinsicID == Intrinsic::is_constant) {
2007 if (
Operands[0]->isManifestConstant())
2012 if (isa<PoisonValue>(
Operands[0])) {
2014 if (IntrinsicID == Intrinsic::canonicalize)
2018 if (isa<UndefValue>(
Operands[0])) {
2022 if (IntrinsicID == Intrinsic::cos ||
2023 IntrinsicID == Intrinsic::ctpop ||
2024 IntrinsicID == Intrinsic::fptoui_sat ||
2025 IntrinsicID == Intrinsic::fptosi_sat ||
2026 IntrinsicID == Intrinsic::canonicalize)
2028 if (IntrinsicID == Intrinsic::bswap ||
2029 IntrinsicID == Intrinsic::bitreverse ||
2030 IntrinsicID == Intrinsic::launder_invariant_group ||
2031 IntrinsicID == Intrinsic::strip_invariant_group)
2035 if (isa<ConstantPointerNull>(
Operands[0])) {
2037 if (IntrinsicID == Intrinsic::launder_invariant_group ||
2038 IntrinsicID == Intrinsic::strip_invariant_group) {
2043 Call->getParent() ?
Call->getCaller() :
nullptr;
2053 if (
auto *
Op = dyn_cast<ConstantFP>(
Operands[0])) {
2054 if (IntrinsicID == Intrinsic::convert_to_fp16) {
2058 Val.
convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &lost);
2065 if (IntrinsicID == Intrinsic::wasm_trunc_signed ||
2066 IntrinsicID == Intrinsic::wasm_trunc_unsigned) {
2067 bool Signed = IntrinsicID == Intrinsic::wasm_trunc_signed;
2074 bool IsExact =
false;
2076 U.convertToInteger(
Int, APFloat::rmTowardZero, &IsExact);
2078 if (
Status == APFloat::opOK ||
Status == APFloat::opInexact)
2079 return ConstantInt::get(Ty,
Int);
2084 if (IntrinsicID == Intrinsic::fptoui_sat ||
2085 IntrinsicID == Intrinsic::fptosi_sat) {
2088 IntrinsicID == Intrinsic::fptoui_sat);
2090 U.convertToInteger(
Int, APFloat::rmTowardZero, &IsExact);
2091 return ConstantInt::get(Ty,
Int);
2094 if (IntrinsicID == Intrinsic::canonicalize)
2095 return constantFoldCanonicalize(Ty, Call, U);
2102 if (IntrinsicID == Intrinsic::nearbyint || IntrinsicID == Intrinsic::rint) {
2103 U.roundToIntegral(APFloat::rmNearestTiesToEven);
2107 if (IntrinsicID == Intrinsic::round) {
2108 U.roundToIntegral(APFloat::rmNearestTiesToAway);
2112 if (IntrinsicID == Intrinsic::roundeven) {
2113 U.roundToIntegral(APFloat::rmNearestTiesToEven);
2117 if (IntrinsicID == Intrinsic::ceil) {
2118 U.roundToIntegral(APFloat::rmTowardPositive);
2122 if (IntrinsicID == Intrinsic::floor) {
2123 U.roundToIntegral(APFloat::rmTowardNegative);
2127 if (IntrinsicID == Intrinsic::trunc) {
2128 U.roundToIntegral(APFloat::rmTowardZero);
2132 if (IntrinsicID == Intrinsic::fabs) {
2137 if (IntrinsicID == Intrinsic::amdgcn_fract) {
2143 FloorU.roundToIntegral(APFloat::rmTowardNegative);
2145 APFloat AlmostOne(
U.getSemantics(), 1);
2146 AlmostOne.next(
true);
2153 std::optional<APFloat::roundingMode>
RM;
2154 switch (IntrinsicID) {
2157 case Intrinsic::experimental_constrained_nearbyint:
2158 case Intrinsic::experimental_constrained_rint: {
2159 auto CI = cast<ConstrainedFPIntrinsic>(Call);
2160 RM = CI->getRoundingMode();
2161 if (!RM || *RM == RoundingMode::Dynamic)
2165 case Intrinsic::experimental_constrained_round:
2166 RM = APFloat::rmNearestTiesToAway;
2168 case Intrinsic::experimental_constrained_ceil:
2169 RM = APFloat::rmTowardPositive;
2171 case Intrinsic::experimental_constrained_floor:
2172 RM = APFloat::rmTowardNegative;
2174 case Intrinsic::experimental_constrained_trunc:
2175 RM = APFloat::rmTowardZero;
2179 auto CI = cast<ConstrainedFPIntrinsic>(Call);
2182 if (IntrinsicID == Intrinsic::experimental_constrained_rint &&
2183 St == APFloat::opInexact) {
2184 std::optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
2188 }
else if (
U.isSignaling()) {
2189 std::optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
2209 switch (IntrinsicID) {
2211 case Intrinsic::log:
2212 return ConstantFoldFP(log, APF, Ty);
2213 case Intrinsic::log2:
2215 return ConstantFoldFP(
log2, APF, Ty);
2216 case Intrinsic::log10:
2218 return ConstantFoldFP(log10, APF, Ty);
2219 case Intrinsic::exp:
2220 return ConstantFoldFP(exp, APF, Ty);
2221 case Intrinsic::exp2:
2223 return ConstantFoldBinaryFP(pow,
APFloat(2.0), APF, Ty);
2224 case Intrinsic::exp10:
2226 return ConstantFoldBinaryFP(pow,
APFloat(10.0), APF, Ty);
2227 case Intrinsic::sin:
2228 return ConstantFoldFP(sin, APF, Ty);
2229 case Intrinsic::cos:
2230 return ConstantFoldFP(cos, APF, Ty);
2231 case Intrinsic::sqrt:
2232 return ConstantFoldFP(sqrt, APF, Ty);
2233 case Intrinsic::amdgcn_cos:
2234 case Intrinsic::amdgcn_sin: {
2235 double V = getValueAsDouble(
Op);
2236 if (V < -256.0 || V > 256.0)
2241 bool IsCos = IntrinsicID == Intrinsic::amdgcn_cos;
2242 double V4 =
V * 4.0;
2243 if (V4 == floor(V4)) {
2245 const double SinVals[4] = { 0.0, 1.0, 0.0, -1.0 };
2246 V = SinVals[((int)V4 + (IsCos ? 1 : 0)) & 3];
2253 return GetConstantFoldFPValue(V, Ty);
2269 case LibFunc_acos_finite:
2270 case LibFunc_acosf_finite:
2272 return ConstantFoldFP(acos, APF, Ty);
2276 case LibFunc_asin_finite:
2277 case LibFunc_asinf_finite:
2279 return ConstantFoldFP(asin, APF, Ty);
2284 return ConstantFoldFP(atan, APF, Ty);
2288 if (TLI->
has(Func)) {
2289 U.roundToIntegral(APFloat::rmTowardPositive);
2296 return ConstantFoldFP(cos, APF, Ty);
2300 case LibFunc_cosh_finite:
2301 case LibFunc_coshf_finite:
2303 return ConstantFoldFP(cosh, APF, Ty);
2307 case LibFunc_exp_finite:
2308 case LibFunc_expf_finite:
2310 return ConstantFoldFP(exp, APF, Ty);
2314 case LibFunc_exp2_finite:
2315 case LibFunc_exp2f_finite:
2318 return ConstantFoldBinaryFP(pow,
APFloat(2.0), APF, Ty);
2322 if (TLI->
has(Func)) {
2328 case LibFunc_floorf:
2329 if (TLI->
has(Func)) {
2330 U.roundToIntegral(APFloat::rmTowardNegative);
2336 case LibFunc_log_finite:
2337 case LibFunc_logf_finite:
2339 return ConstantFoldFP(log, APF, Ty);
2343 case LibFunc_log2_finite:
2344 case LibFunc_log2f_finite:
2347 return ConstantFoldFP(
log2, APF, Ty);
2350 case LibFunc_log10f:
2351 case LibFunc_log10_finite:
2352 case LibFunc_log10f_finite:
2355 return ConstantFoldFP(log10, APF, Ty);
2357 case LibFunc_nearbyint:
2358 case LibFunc_nearbyintf:
2361 if (TLI->
has(Func)) {
2362 U.roundToIntegral(APFloat::rmNearestTiesToEven);
2367 case LibFunc_roundf:
2368 if (TLI->
has(Func)) {
2369 U.roundToIntegral(APFloat::rmNearestTiesToAway);
2376 return ConstantFoldFP(sin, APF, Ty);
2380 case LibFunc_sinh_finite:
2381 case LibFunc_sinhf_finite:
2383 return ConstantFoldFP(sinh, APF, Ty);
2388 return ConstantFoldFP(sqrt, APF, Ty);
2393 return ConstantFoldFP(tan, APF, Ty);
2398 return ConstantFoldFP(tanh, APF, Ty);
2401 case LibFunc_truncf:
2402 if (TLI->
has(Func)) {
2403 U.roundToIntegral(APFloat::rmTowardZero);
2411 if (
auto *
Op = dyn_cast<ConstantInt>(
Operands[0])) {
2412 switch (IntrinsicID) {
2413 case Intrinsic::bswap:
2414 return ConstantInt::get(Ty->
getContext(),
Op->getValue().byteSwap());
2415 case Intrinsic::ctpop:
2416 return ConstantInt::get(Ty,
Op->getValue().popcount());
2417 case Intrinsic::bitreverse:
2418 return ConstantInt::get(Ty->
getContext(),
Op->getValue().reverseBits());
2419 case Intrinsic::convert_from_fp16: {
2420 APFloat Val(APFloat::IEEEhalf(),
Op->getValue());
2428 assert(status != APFloat::opInexact && !lost &&
2429 "Precision lost during fp16 constfolding");
2431 return ConstantFP::get(Ty->
getContext(), Val);
2434 case Intrinsic::amdgcn_s_wqm: {
2436 Val |= (Val & 0x5555555555555555ULL) << 1 |
2437 ((Val >> 1) & 0x5555555555555555ULL);
2438 Val |= (Val & 0x3333333333333333ULL) << 2 |
2439 ((Val >> 2) & 0x3333333333333333ULL);
2440 return ConstantInt::get(Ty, Val);
2443 case Intrinsic::amdgcn_s_quadmask: {
2446 for (
unsigned I = 0;
I <
Op->getBitWidth() / 4; ++
I, Val >>= 4) {
2450 QuadMask |= (1ULL <<
I);
2452 return ConstantInt::get(Ty, QuadMask);
2455 case Intrinsic::amdgcn_s_bitreplicate: {
2457 Val = (Val & 0x000000000000FFFFULL) | (Val & 0x00000000FFFF0000ULL) << 16;
2458 Val = (Val & 0x000000FF000000FFULL) | (Val & 0x0000FF000000FF00ULL) << 8;
2459 Val = (Val & 0x000F000F000F000FULL) | (Val & 0x00F000F000F000F0ULL) << 4;
2460 Val = (Val & 0x0303030303030303ULL) | (Val & 0x0C0C0C0C0C0C0C0CULL) << 2;
2461 Val = (Val & 0x1111111111111111ULL) | (Val & 0x2222222222222222ULL) << 1;
2462 Val = Val | Val << 1;
2463 return ConstantInt::get(Ty, Val);
2471 switch (IntrinsicID) {
2473 case Intrinsic::vector_reduce_add:
2474 case Intrinsic::vector_reduce_mul:
2475 case Intrinsic::vector_reduce_and:
2476 case Intrinsic::vector_reduce_or:
2477 case Intrinsic::vector_reduce_xor:
2478 case Intrinsic::vector_reduce_smin:
2479 case Intrinsic::vector_reduce_smax:
2480 case Intrinsic::vector_reduce_umin:
2481 case Intrinsic::vector_reduce_umax:
2488 if (isa<ConstantVector>(
Operands[0]) ||
2489 isa<ConstantDataVector>(
Operands[0])) {
2491 switch (IntrinsicID) {
2493 case Intrinsic::x86_sse_cvtss2si:
2494 case Intrinsic::x86_sse_cvtss2si64:
2495 case Intrinsic::x86_sse2_cvtsd2si:
2496 case Intrinsic::x86_sse2_cvtsd2si64:
2498 dyn_cast_or_null<ConstantFP>(
Op->getAggregateElement(0U)))
2499 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2503 case Intrinsic::x86_sse_cvttss2si:
2504 case Intrinsic::x86_sse_cvttss2si64:
2505 case Intrinsic::x86_sse2_cvttsd2si:
2506 case Intrinsic::x86_sse2_cvttsd2si64:
2508 dyn_cast_or_null<ConstantFP>(
Op->getAggregateElement(0U)))
2509 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2522 auto *FCmp = cast<ConstrainedFPCmpIntrinsic>(Call);
2524 if (FCmp->isSignaling()) {
2526 St = APFloat::opInvalidOp;
2529 St = APFloat::opInvalidOp;
2533 return ConstantInt::get(
Call->getType()->getScalarType(), Result);
2547 const auto *Op1 = dyn_cast<ConstantFP>(
Operands[0]);
2551 const auto *Op2 = dyn_cast<ConstantFP>(
Operands[1]);
2555 const APFloat &Op1V = Op1->getValueAPF();
2556 const APFloat &Op2V = Op2->getValueAPF();
2563 case LibFunc_pow_finite:
2564 case LibFunc_powf_finite:
2566 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
2570 if (TLI->
has(Func)) {
2572 if (APFloat::opStatus::opOK ==
V.mod(Op2->getValueAPF()))
2576 case LibFunc_remainder:
2577 case LibFunc_remainderf:
2578 if (TLI->
has(Func)) {
2580 if (APFloat::opStatus::opOK ==
V.remainder(Op2->getValueAPF()))
2585 case LibFunc_atan2f:
2591 case LibFunc_atan2_finite:
2592 case LibFunc_atan2f_finite:
2594 return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty);
2609 bool IsOp0Undef = isa<UndefValue>(
Operands[0]);
2610 bool IsOp1Undef = isa<UndefValue>(
Operands[1]);
2611 switch (IntrinsicID) {
2612 case Intrinsic::maxnum:
2613 case Intrinsic::minnum:
2614 case Intrinsic::maximum:
2615 case Intrinsic::minimum:
2625 if (
const auto *Op1 = dyn_cast<ConstantFP>(
Operands[0])) {
2626 const APFloat &Op1V = Op1->getValueAPF();
2628 if (
const auto *Op2 = dyn_cast<ConstantFP>(
Operands[1])) {
2629 if (Op2->getType() != Op1->getType())
2631 const APFloat &Op2V = Op2->getValueAPF();
2633 if (
const auto *ConstrIntr =
2634 dyn_cast_if_present<ConstrainedFPIntrinsic>(Call)) {
2638 switch (IntrinsicID) {
2641 case Intrinsic::experimental_constrained_fadd:
2642 St = Res.
add(Op2V, RM);
2644 case Intrinsic::experimental_constrained_fsub:
2647 case Intrinsic::experimental_constrained_fmul:
2650 case Intrinsic::experimental_constrained_fdiv:
2651 St = Res.
divide(Op2V, RM);
2653 case Intrinsic::experimental_constrained_frem:
2656 case Intrinsic::experimental_constrained_fcmp:
2657 case Intrinsic::experimental_constrained_fcmps:
2658 return evaluateCompare(Op1V, Op2V, ConstrIntr);
2662 return ConstantFP::get(Ty->
getContext(), Res);
2666 switch (IntrinsicID) {
2669 case Intrinsic::copysign:
2671 case Intrinsic::minnum:
2673 case Intrinsic::maxnum:
2675 case Intrinsic::minimum:
2677 case Intrinsic::maximum:
2684 switch (IntrinsicID) {
2687 case Intrinsic::pow:
2688 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
2689 case Intrinsic::amdgcn_fmul_legacy:
2694 return ConstantFP::get(Ty->
getContext(), Op1V * Op2V);
2697 }
else if (
auto *Op2C = dyn_cast<ConstantInt>(
Operands[1])) {
2698 switch (IntrinsicID) {
2699 case Intrinsic::ldexp: {
2700 return ConstantFP::get(
2702 scalbn(Op1V, Op2C->getSExtValue(), APFloat::rmNearestTiesToEven));
2704 case Intrinsic::is_fpclass: {
2717 return ConstantInt::get(Ty, Result);
2725 if (IntrinsicID == Intrinsic::powi && Ty->
isHalfTy())
2726 return ConstantFP::get(
2729 (
int)Op2C->getZExtValue())));
2730 if (IntrinsicID == Intrinsic::powi && Ty->
isFloatTy())
2731 return ConstantFP::get(
2734 (
int)Op2C->getZExtValue())));
2735 if (IntrinsicID == Intrinsic::powi && Ty->
isDoubleTy())
2736 return ConstantFP::get(
2739 (
int)Op2C->getZExtValue())));
2746 const APInt *C0, *C1;
2747 if (!getConstIntOrUndef(
Operands[0], C0) ||
2748 !getConstIntOrUndef(
Operands[1], C1))
2751 switch (IntrinsicID) {
2753 case Intrinsic::smax:
2754 case Intrinsic::smin:
2755 case Intrinsic::umax:
2756 case Intrinsic::umin:
2766 return ConstantInt::get(
2772 case Intrinsic::usub_with_overflow:
2773 case Intrinsic::ssub_with_overflow:
2779 case Intrinsic::uadd_with_overflow:
2780 case Intrinsic::sadd_with_overflow:
2785 cast<StructType>(Ty),
2790 case Intrinsic::smul_with_overflow:
2791 case Intrinsic::umul_with_overflow: {
2799 switch (IntrinsicID) {
2801 case Intrinsic::sadd_with_overflow:
2802 Res = C0->
sadd_ov(*C1, Overflow);
2804 case Intrinsic::uadd_with_overflow:
2805 Res = C0->
uadd_ov(*C1, Overflow);
2807 case Intrinsic::ssub_with_overflow:
2808 Res = C0->
ssub_ov(*C1, Overflow);
2810 case Intrinsic::usub_with_overflow:
2811 Res = C0->
usub_ov(*C1, Overflow);
2813 case Intrinsic::smul_with_overflow:
2814 Res = C0->
smul_ov(*C1, Overflow);
2816 case Intrinsic::umul_with_overflow:
2817 Res = C0->
umul_ov(*C1, Overflow);
2826 case Intrinsic::uadd_sat:
2827 case Intrinsic::sadd_sat:
2837 if (IntrinsicID == Intrinsic::uadd_sat)
2838 return ConstantInt::get(Ty, C0->
uadd_sat(*C1));
2840 return ConstantInt::get(Ty, C0->
sadd_sat(*C1));
2841 case Intrinsic::usub_sat:
2842 case Intrinsic::ssub_sat:
2852 if (IntrinsicID == Intrinsic::usub_sat)
2853 return ConstantInt::get(Ty, C0->
usub_sat(*C1));
2855 return ConstantInt::get(Ty, C0->
ssub_sat(*C1));
2856 case Intrinsic::cttz:
2857 case Intrinsic::ctlz:
2858 assert(C1 &&
"Must be constant int");
2865 if (IntrinsicID == Intrinsic::cttz)
2870 case Intrinsic::abs:
2871 assert(C1 &&
"Must be constant int");
2882 return ConstantInt::get(Ty, C0->
abs());
2883 case Intrinsic::amdgcn_wave_reduce_umin:
2884 case Intrinsic::amdgcn_wave_reduce_umax:
2885 return dyn_cast<Constant>(
Operands[0]);
2892 if ((isa<ConstantVector>(
Operands[0]) ||
2893 isa<ConstantDataVector>(
Operands[0])) &&
2897 cast<ConstantInt>(
Operands[1])->getValue() == 4) {
2899 switch (IntrinsicID) {
2901 case Intrinsic::x86_avx512_vcvtss2si32:
2902 case Intrinsic::x86_avx512_vcvtss2si64:
2903 case Intrinsic::x86_avx512_vcvtsd2si32:
2904 case Intrinsic::x86_avx512_vcvtsd2si64:
2906 dyn_cast_or_null<ConstantFP>(
Op->getAggregateElement(0U)))
2907 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2911 case Intrinsic::x86_avx512_vcvtss2usi32:
2912 case Intrinsic::x86_avx512_vcvtss2usi64:
2913 case Intrinsic::x86_avx512_vcvtsd2usi32:
2914 case Intrinsic::x86_avx512_vcvtsd2usi64:
2916 dyn_cast_or_null<ConstantFP>(
Op->getAggregateElement(0U)))
2917 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2921 case Intrinsic::x86_avx512_cvttss2si:
2922 case Intrinsic::x86_avx512_cvttss2si64:
2923 case Intrinsic::x86_avx512_cvttsd2si:
2924 case Intrinsic::x86_avx512_cvttsd2si64:
2926 dyn_cast_or_null<ConstantFP>(
Op->getAggregateElement(0U)))
2927 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2931 case Intrinsic::x86_avx512_cvttss2usi:
2932 case Intrinsic::x86_avx512_cvttss2usi64:
2933 case Intrinsic::x86_avx512_cvttsd2usi:
2934 case Intrinsic::x86_avx512_cvttsd2usi64:
2936 dyn_cast_or_null<ConstantFP>(
Op->getAggregateElement(0U)))
2937 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2965 if (
S1.isNegative() &&
S1.isNonZero() && !
S1.isNaN()) {
2987 switch (IntrinsicID) {
2990 case Intrinsic::amdgcn_cubeid:
2992 case Intrinsic::amdgcn_cubema:
2994 case Intrinsic::amdgcn_cubesc:
2996 case Intrinsic::amdgcn_cubetc:
3003 const APInt *C0, *C1, *C2;
3004 if (!getConstIntOrUndef(
Operands[0], C0) ||
3005 !getConstIntOrUndef(
Operands[1], C1) ||
3006 !getConstIntOrUndef(
Operands[2], C2))
3013 unsigned NumUndefBytes = 0;
3014 for (
unsigned I = 0;
I < 32;
I += 8) {
3023 const APInt *Src = ((Sel & 10) == 10 || (Sel & 12) == 4) ? C0 : C1;
3027 B = Src->extractBitsAsZExtValue(8, (Sel & 3) * 8);
3029 B = Src->extractBitsAsZExtValue(1, (Sel & 1) ? 31 : 15) * 0xff;
3032 Val.insertBits(
B,
I, 8);
3035 if (NumUndefBytes == 4)
3038 return ConstantInt::get(Ty, Val);
3049 if (
const auto *Op1 = dyn_cast<ConstantFP>(
Operands[0])) {
3050 if (
const auto *Op2 = dyn_cast<ConstantFP>(
Operands[1])) {
3051 if (
const auto *Op3 = dyn_cast<ConstantFP>(
Operands[2])) {
3052 const APFloat &C1 = Op1->getValueAPF();
3053 const APFloat &C2 = Op2->getValueAPF();
3054 const APFloat &C3 = Op3->getValueAPF();
3056 if (
const auto *ConstrIntr = dyn_cast<ConstrainedFPIntrinsic>(Call)) {
3060 switch (IntrinsicID) {
3063 case Intrinsic::experimental_constrained_fma:
3064 case Intrinsic::experimental_constrained_fmuladd:
3068 if (mayFoldConstrained(
3070 return ConstantFP::get(Ty->
getContext(), Res);
3074 switch (IntrinsicID) {
3076 case Intrinsic::amdgcn_fma_legacy: {
3086 case Intrinsic::fma:
3087 case Intrinsic::fmuladd: {
3089 V.fusedMultiplyAdd(C2, C3, APFloat::rmNearestTiesToEven);
3092 case Intrinsic::amdgcn_cubeid:
3093 case Intrinsic::amdgcn_cubema:
3094 case Intrinsic::amdgcn_cubesc:
3095 case Intrinsic::amdgcn_cubetc: {
3096 APFloat V = ConstantFoldAMDGCNCubeIntrinsic(IntrinsicID, C1, C2, C3);
3104 if (IntrinsicID == Intrinsic::smul_fix ||
3105 IntrinsicID == Intrinsic::smul_fix_sat) {
3111 const APInt *C0, *C1;
3112 if (!getConstIntOrUndef(
Operands[0], C0) ||
3113 !getConstIntOrUndef(
Operands[1], C1))
3127 unsigned Scale = cast<ConstantInt>(
Operands[2])->getZExtValue();
3129 assert(Scale < Width &&
"Illegal scale.");
3130 unsigned ExtendedWidth = Width * 2;
3132 (C0->
sext(ExtendedWidth) * C1->
sext(ExtendedWidth)).ashr(Scale);
3133 if (IntrinsicID == Intrinsic::smul_fix_sat) {
3142 if (IntrinsicID == Intrinsic::fshl || IntrinsicID == Intrinsic::fshr) {
3143 const APInt *C0, *C1, *C2;
3144 if (!getConstIntOrUndef(
Operands[0], C0) ||
3145 !getConstIntOrUndef(
Operands[1], C1) ||
3146 !getConstIntOrUndef(
Operands[2], C2))
3149 bool IsRight = IntrinsicID == Intrinsic::fshr;
3163 unsigned LshrAmt = IsRight ? ShAmt :
BitWidth - ShAmt;
3164 unsigned ShlAmt = !IsRight ? ShAmt :
BitWidth - ShAmt;
3166 return ConstantInt::get(Ty, C1->
lshr(LshrAmt));
3168 return ConstantInt::get(Ty, C0->
shl(ShlAmt));
3169 return ConstantInt::get(Ty, C0->
shl(ShlAmt) | C1->
lshr(LshrAmt));
3172 if (IntrinsicID == Intrinsic::amdgcn_perm)
3173 return ConstantFoldAMDGCNPermIntrinsic(
Operands, Ty);
3185 return ConstantFoldScalarCall1(
Name, IntrinsicID, Ty,
Operands, TLI, Call);
3190 return FoldedLibCall;
3192 return ConstantFoldIntrinsicCall2(IntrinsicID, Ty,
Operands, Call);
3196 return ConstantFoldScalarCall3(
Name, IntrinsicID, Ty,
Operands, TLI, Call);
3201static Constant *ConstantFoldFixedVectorCall(
3209 switch (IntrinsicID) {
3210 case Intrinsic::masked_load: {
3219 auto *MaskElt =
Mask->getAggregateElement(
I);
3222 auto *PassthruElt = Passthru->getAggregateElement(
I);
3224 if (isa<UndefValue>(MaskElt)) {
3232 if (MaskElt->isNullValue()) {
3236 }
else if (MaskElt->isOneValue()) {
3248 case Intrinsic::arm_mve_vctp8:
3249 case Intrinsic::arm_mve_vctp16:
3250 case Intrinsic::arm_mve_vctp32:
3251 case Intrinsic::arm_mve_vctp64: {
3252 if (
auto *
Op = dyn_cast<ConstantInt>(
Operands[0])) {
3257 for (
unsigned i = 0; i < Lanes; i++) {
3267 case Intrinsic::get_active_lane_mask: {
3268 auto *Op0 = dyn_cast<ConstantInt>(
Operands[0]);
3269 auto *Op1 = dyn_cast<ConstantInt>(
Operands[1]);
3273 uint64_t Limit = Op1->getZExtValue();
3276 for (
unsigned i = 0; i < Lanes; i++) {
3277 if (
Base + i < Limit)
3292 for (
unsigned J = 0, JE =
Operands.size(); J != JE; ++J) {
3308 ConstantFoldScalarCall(
Name, IntrinsicID, Ty, Lane, TLI, Call);
3317static Constant *ConstantFoldScalableVectorCall(
3321 switch (IntrinsicID) {
3322 case Intrinsic::aarch64_sve_convert_from_svbool: {
3323 auto *Src = dyn_cast<Constant>(
Operands[0]);
3324 if (!Src || !Src->isNullValue())
3335static std::pair<Constant *, Constant *>
3337 if (isa<PoisonValue>(
Op))
3340 auto *ConstFP = dyn_cast<ConstantFP>(
Op);
3344 const APFloat &
U = ConstFP->getValueAPF();
3346 APFloat FrexpMant =
frexp(U, FrexpExp, APFloat::rmNearestTiesToEven);
3347 Constant *Result0 = ConstantFP::get(ConstFP->getType(), FrexpMant);
3351 Constant *Result1 = FrexpMant.
isFinite() ? ConstantInt::get(IntTy, FrexpExp)
3353 return {Result0, Result1};
3363 switch (IntrinsicID) {
3364 case Intrinsic::frexp: {
3368 if (
auto *FVTy0 = dyn_cast<FixedVectorType>(Ty0)) {
3372 for (
unsigned I = 0,
E = FVTy0->getNumElements();
I !=
E; ++
I) {
3374 std::tie(Results0[
I], Results1[
I]) =
3375 ConstantFoldScalarFrexpCall(Lane, Ty1);
3384 auto [Result0, Result1] = ConstantFoldScalarFrexpCall(
Operands[0], Ty1);
3392 return ConstantFoldScalarCall(
Name, IntrinsicID, StTy,
Operands, TLI, Call);
3403 return ConstantFoldIntrinsicCall2(
ID, Ty, {
LHS,
RHS},
3404 dyn_cast_if_present<CallBase>(FMFSource));
3410 if (Call->isNoBuiltin())
3426 Type *Ty =
F->getReturnType();
3427 if (
auto *FVTy = dyn_cast<FixedVectorType>(Ty))
3428 return ConstantFoldFixedVectorCall(
3429 Name, IID, FVTy,
Operands,
F->getParent()->getDataLayout(), TLI, Call);
3431 if (
auto *SVTy = dyn_cast<ScalableVectorType>(Ty))
3432 return ConstantFoldScalableVectorCall(
3433 Name, IID, SVTy,
Operands,
F->getParent()->getDataLayout(), TLI, Call);
3435 if (
auto *StTy = dyn_cast<StructType>(Ty))
3436 return ConstantFoldStructCall(
Name, IID, StTy,
Operands,
3437 F->getParent()->getDataLayout(), TLI, Call);
3442 return ConstantFoldScalarCall(
Name, IID, Ty,
Operands, TLI, Call);
3449 if (Call->isNoBuiltin() || Call->isStrictFP())
3451 Function *
F = Call->getCalledFunction();
3459 if (Call->arg_size() == 1) {
3460 if (
ConstantFP *OpC = dyn_cast<ConstantFP>(Call->getArgOperand(0))) {
3469 case LibFunc_log10l:
3471 case LibFunc_log10f:
3472 return Op.isNaN() || (!
Op.isZero() && !
Op.isNegative());
3478 if (OpC->getType()->isDoubleTy())
3480 if (OpC->getType()->isFloatTy())
3488 if (OpC->getType()->isDoubleTy())
3490 if (OpC->getType()->isFloatTy())
3500 return !
Op.isInfinity();
3504 case LibFunc_tanf: {
3507 Type *Ty = OpC->getType();
3509 return ConstantFoldFP(tan, OpC->getValueAPF(), Ty) !=
nullptr;
3536 if (OpC->getType()->isDoubleTy())
3538 if (OpC->getType()->isFloatTy())
3545 return Op.isNaN() ||
Op.isZero() || !
Op.isNegative();
3555 if (Call->arg_size() == 2) {
3556 ConstantFP *Op0C = dyn_cast<ConstantFP>(Call->getArgOperand(0));
3557 ConstantFP *Op1C = dyn_cast<ConstantFP>(Call->getArgOperand(1));
3565 case LibFunc_powf: {
3571 return ConstantFoldBinaryFP(pow, Op0, Op1, Ty) !=
nullptr;
3579 case LibFunc_remainderl:
3580 case LibFunc_remainder:
3581 case LibFunc_remainderf:
3586 case LibFunc_atan2f:
3587 case LibFunc_atan2l:
3603void TargetFolder::anchor() {}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
This file implements the APSInt class, which is a simple class that represents an arbitrary sized int...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static Constant * FoldBitCast(Constant *V, Type *DestTy)
Constant * getConstantAtOffset(Constant *Base, APInt Offset, const DataLayout &DL)
If this Offset points exactly to the start of an aggregate element, return that element,...
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file defines the DenseMap class.
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
amode Optimize addressing mode
mir Rename Register Operands
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
static APFloat getQNaN(const fltSemantics &Sem, bool Negative=false, const APInt *payload=nullptr)
Factory for QNaN values.
opStatus divide(const APFloat &RHS, roundingMode RM)
void copySign(const APFloat &RHS)
opStatus convert(const fltSemantics &ToSemantics, roundingMode RM, bool *losesInfo)
opStatus subtract(const APFloat &RHS, roundingMode RM)
double convertToDouble() const
Converts this APFloat to host double value.
bool isPosInfinity() const
opStatus add(const APFloat &RHS, roundingMode RM)
const fltSemantics & getSemantics() const
opStatus multiply(const APFloat &RHS, roundingMode RM)
opStatus fusedMultiplyAdd(const APFloat &Multiplicand, const APFloat &Addend, roundingMode RM)
APInt bitcastToAPInt() const
opStatus convertToInteger(MutableArrayRef< integerPart > Input, unsigned int Width, bool IsSigned, roundingMode RM, bool *IsExact) const
opStatus mod(const APFloat &RHS)
bool isNegInfinity() const
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
Class for arbitrary precision integers.
APInt umul_ov(const APInt &RHS, bool &Overflow) const
APInt usub_sat(const APInt &RHS) const
bool isMinSignedValue() const
Determine if this is the smallest signed value.
uint64_t getZExtValue() const
Get zero extended value.
uint64_t extractBitsAsZExtValue(unsigned numBits, unsigned bitPosition) const
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
APInt trunc(unsigned width) const
Truncate to new width.
APInt abs() const
Get the absolute value.
APInt sadd_sat(const APInt &RHS) const
APInt usub_ov(const APInt &RHS, bool &Overflow) const
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
APInt urem(const APInt &RHS) const
Unsigned remainder operation.
unsigned getBitWidth() const
Return the number of bits in the APInt.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
APInt sadd_ov(const APInt &RHS, bool &Overflow) const
APInt uadd_ov(const APInt &RHS, bool &Overflow) const
unsigned countr_zero() const
Count the number of trailing zero bits.
unsigned countl_zero() const
The APInt version of std::countl_zero.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
APInt uadd_sat(const APInt &RHS) const
APInt smul_ov(const APInt &RHS, bool &Overflow) const
APInt sext(unsigned width) const
Sign extend to a new width.
APInt shl(unsigned shiftAmt) const
Left-shift function.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
APInt extractBits(unsigned numBits, unsigned bitPosition) const
Return an APInt with the extracted bits [bitPosition,bitPosition+numBits).
APInt ssub_ov(const APInt &RHS, bool &Overflow) const
bool isOne() const
Determine if this is a value of 1.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
APInt ssub_sat(const APInt &RHS) const
An arbitrary precision integer that knows its signedness.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & back() const
back - Get the last element.
size_t size() const
size - Get the array size.
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
static Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)
Returns the opcode necessary to cast Val into Ty using usual casting rules.
static bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
static Constant * get(LLVMContext &Context, ArrayRef< ElementTy > Elts)
get() constructor - Return a constant with array type with an element count and element type matching...
static Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getExtractElement(Constant *Vec, Constant *Idx, Type *OnlyIfReducedTy=nullptr)
static bool isDesirableCastOp(unsigned Opcode)
Whether creating a constant expression for this cast is desirable.
static Constant * getCast(unsigned ops, Constant *C, Type *Ty, bool OnlyIfReduced=false)
Convenience function for getting a Cast operation.
static Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static Constant * getInsertElement(Constant *Vec, Constant *Elt, Constant *Idx, Type *OnlyIfReducedTy=nullptr)
static Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getShuffleVector(Constant *V1, Constant *V2, ArrayRef< int > Mask, Type *OnlyIfReducedTy=nullptr)
static bool isSupportedGetElementPtr(const Type *SrcElemTy)
Whether creating a constant expression for this getelementptr type is supported.
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, bool InBounds=false, std::optional< unsigned > InRangeIndex=std::nullopt, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
static Constant * get(unsigned Opcode, Constant *C1, Constant *C2, unsigned Flags=0, Type *OnlyIfReducedTy=nullptr)
get - Return a binary or shift operator constant expression, folding if possible.
static bool isDesirableBinOp(unsigned Opcode)
Whether creating a constant expression for this binary operator is desirable.
static Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getCompare(unsigned short pred, Constant *C1, Constant *C2, bool OnlyIfReduced=false)
Return an ICmp or FCmp comparison operator constant expression.
ConstantFP - Floating Point Values [float, double].
const APFloat & getValueAPF() const
static Constant * getZero(Type *Ty, bool Negative=false)
This is the shared class of boolean and integer constants.
static ConstantInt * getTrue(LLVMContext &Context)
static ConstantInt * getFalse(LLVMContext &Context)
static Constant * get(StructType *T, ArrayRef< Constant * > V)
static Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
static Constant * getAllOnesValue(Type *Ty)
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Constrained floating point compare intrinsics.
This is the common base class for constrained floating point intrinsics.
std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
std::optional< RoundingMode > getRoundingMode() const
Wrapper for a function that represents a value that functionally represents the original function.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
static bool compare(const APFloat &LHS, const APFloat &RHS, FCmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
Class to represent fixed width SIMD vectors.
unsigned getNumElements() const
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
DenormalMode getDenormalMode(const fltSemantics &FPType) const
Returns the denormal handling type for the default rounding mode of the function.
Type * getSourceElementType() const
std::optional< unsigned > getInRangeIndex() const
Returns the offset of the index with an inrange attachment, or std::nullopt if none.
static Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
static Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
Module * getParent()
Get the module that this global value is contained inside of...
PointerType * getType() const
Global values are always pointers.
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
static bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
const BasicBlock * getParent() const
const Function * getFunction() const
Return the function this instruction belongs to.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
static APInt getSaturationPoint(Intrinsic::ID ID, unsigned numBits)
Min/max intrinsics are monotonic, they operate on a fixed-bitwidth values, so there is a certain thre...
ICmpInst::Predicate getPredicate() const
Returns the comparison predicate underlying the intrinsic.
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Class to represent scalable SIMD vectors.
void push_back(const T &Elt)
pointer data()
Return a pointer to the vector's buffer, even if empty().
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
unsigned getElementContainingOffset(uint64_t FixedOffset) const
Given a valid byte offset into the structure, returns the structure index that contains it.
TypeSize getElementOffset(unsigned Idx) const
Class to represent struct types.
Provides information about what library functions are available for the current target.
bool has(LibFunc F) const
Tests whether a library function is available.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
The instances of the Type class are immutable: once they are created, they are never changed.
unsigned getIntegerBitWidth() const
Type * getStructElementType(unsigned N) const
const fltSemantics & getFltSemantics() const
bool isVectorTy() const
True if this is an instance of VectorType.
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
bool isPointerTy() const
True if this is an instance of PointerType.
static IntegerType * getInt1Ty(LLVMContext &C)
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
bool isBFloatTy() const
Return true if this is 'bfloat', a 16-bit bfloat type.
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isX86_MMXTy() const
Return true if this is X86 MMX.
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isStructTy() const
True if this is an instance of StructType.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
static IntegerType * getInt16Ty(LLVMContext &C)
bool isAggregateType() const
Return true if the type is an aggregate type.
bool isHalfTy() const
Return true if this is 'half', a 16-bit IEEE fp type.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
static IntegerType * getInt8Ty(LLVMContext &C)
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
bool isX86_AMXTy() const
Return true if this is X86 AMX.
static IntegerType * getInt32Ty(LLVMContext &C)
static IntegerType * getInt64Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Type * getContainedType(unsigned i) const
This method is used to implement the type iterator (defined at the end of the file).
bool isIEEELikeFPTy() const
Return true if this is a well-behaved IEEE-like type, which has a IEEE compatible layout as defined b...
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
LLVMContext & getContext() const
All values hold a context through their type.
Type * getElementType() const
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
const APInt & smin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be signed.
const APInt & smax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be signed.
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
const APInt & umax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be unsigned.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
@ SC
CHAIN = SC CHAIN, Imm128 - System call.
@ CE
Windows NT (Windows on ARM)
@ ebStrict
This corresponds to "fpexcept.strict".
@ ebIgnore
This corresponds to "fpexcept.ignore".
NodeAddr< FuncNode * > Func
std::error_code status(const Twine &path, file_status &result, bool follow=true)
Get file status as if by POSIX stat().
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Constant * ConstantFoldBinaryIntrinsic(Intrinsic::ID ID, Constant *LHS, Constant *RHS, Type *Ty, Instruction *FMFSource)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Constant * ConstantFoldLoadThroughBitcast(Constant *C, Type *DestTy, const DataLayout &DL)
ConstantFoldLoadThroughBitcast - try to cast constant to destination type returning null if unsuccess...
static double log2(double V)
Constant * ConstantFoldSelectInstruction(Constant *Cond, Constant *V1, Constant *V2)
Attempt to constant fold a select instruction with the specified operands.
bool canConstantFoldCallTo(const CallBase *Call, const Function *F)
canConstantFoldCallTo - Return true if its even possible to fold a call to the specified function.
unsigned getPointerAddressSpace(const Type *T)
APFloat abs(APFloat X)
Returns the absolute value of the argument.
Constant * ConstantFoldFPInstOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL, const Instruction *I)
Attempt to constant fold a floating point binary operation with the specified operands,...
Constant * ConstantFoldUnaryInstruction(unsigned Opcode, Constant *V)
bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, APInt &Offset, const DataLayout &DL, DSOLocalEquivalent **DSOEquiv=nullptr)
If this constant is a constant offset from a global, return the global and the constant.
bool isMathLibCallNoop(const CallBase *Call, const TargetLibraryInfo *TLI)
Check whether the given call has no side-effects.
Constant * ReadByteArrayFromGlobal(const GlobalVariable *GV, uint64_t Offset)
LLVM_READONLY APFloat maximum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 maximum semantics.
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments and pointer casts from the specified value,...
Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
APFloat frexp(const APFloat &X, int &Exp, APFloat::roundingMode RM)
Equivalent of C standard library function.
Constant * ConstantFoldExtractValueInstruction(Constant *Agg, ArrayRef< unsigned > Idxs)
Attempt to constant fold an extractvalue instruction with the specified operands and indices.
Constant * ConstantFoldCall(const CallBase *Call, Function *F, ArrayRef< Constant * > Operands, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldCall - Attempt to constant fold a call to the specified function with the specified argum...
Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Fold the constant using the specified DataLayout.
LLVM_READONLY APFloat maxnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2019 maximumNumber semantics.
Constant * ConstantFoldLoadFromUniformValue(Constant *C, Type *Ty, const DataLayout &DL)
If C is a uniform value where all bits are the same (either all zero, all ones, all undef or all pois...
Constant * ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op, const DataLayout &DL)
Attempt to constant fold a unary operation with the specified operand.
Constant * FlushFPConstant(Constant *Operand, const Instruction *I, bool IsOutput)
Attempt to flush float point constant according to denormal mode set in the instruction's parent func...
Constant * ConstantFoldInstOperands(Instruction *I, ArrayRef< Constant * > Ops, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldInstOperands - Attempt to constant fold an instruction with the specified operands.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
APFloat scalbn(APFloat X, int Exp, APFloat::roundingMode RM)
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
Constant * ConstantFoldLoadFromConst(Constant *C, Type *Ty, const APInt &Offset, const DataLayout &DL)
Extract value of C at the given Offset reinterpreted as Ty.
Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
LLVM_READONLY APFloat minnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2019 minimumNumber semantics.
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
DWARFExpression::Operation Op
Constant * ConstantFoldInstruction(Instruction *I, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldInstruction - Try to constant fold the specified instruction.
RoundingMode
Rounding mode.
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
constexpr unsigned BitWidth
bool isVectorIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, unsigned ScalarOpdIdx)
Identifies if the vector form of the intrinsic has a scalar operand.
Constant * ConstantFoldCastInstruction(unsigned opcode, Constant *V, Type *DestTy)
Constant * ConstantFoldInsertValueInstruction(Constant *Agg, Constant *Val, ArrayRef< unsigned > Idxs)
ConstantFoldInsertValueInstruction - Attempt to constant fold an insertvalue instruction with the spe...
Constant * ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, APInt Offset, const DataLayout &DL)
Return the value that a load from C with offset Offset would produce if it is constant and determinab...
LLVM_READONLY APFloat minimum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 minimum semantics.
Constant * ConstantFoldIntegerCast(Constant *C, Type *DestTy, bool IsSigned, const DataLayout &DL)
Constant fold a zext, sext or trunc, depending on IsSigned and whether the DestTy is wider or narrowe...
Constant * ConstantFoldBinaryInstruction(unsigned Opcode, Constant *V1, Constant *V2)
opStatus
IEEE-754R 7: Default exception handling.
Represent subnormal handling kind for floating point instruction inputs and outputs.
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
DenormalModeKind
Represent handled modes for denormal (aka subnormal) modes in the floating point environment.
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
@ PositiveZero
Denormals are flushed to positive zero.
@ Dynamic
Denormals have unknown treatment.
@ IEEE
IEEE-754 denormal numbers preserved.
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
static constexpr DenormalMode getIEEE()
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
bool isConstant() const
Returns true if we know the value of all bits.
const APInt & getConstant() const
Returns the value when all bits have a known value.