LLVM 23.0.0git
BasicTTIImpl.h
Go to the documentation of this file.
1//===- BasicTTIImpl.h -------------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file provides a helper that implements much of the TTI interface in
11/// terms of the target-independent code generator and TargetLowering
12/// interfaces.
13//
14//===----------------------------------------------------------------------===//
15
16#ifndef LLVM_CODEGEN_BASICTTIIMPL_H
17#define LLVM_CODEGEN_BASICTTIIMPL_H
18
19#include "llvm/ADT/APInt.h"
20#include "llvm/ADT/BitVector.h"
21#include "llvm/ADT/STLExtras.h"
35#include "llvm/IR/BasicBlock.h"
36#include "llvm/IR/Constant.h"
37#include "llvm/IR/Constants.h"
38#include "llvm/IR/DataLayout.h"
40#include "llvm/IR/InstrTypes.h"
41#include "llvm/IR/Instruction.h"
43#include "llvm/IR/Intrinsics.h"
44#include "llvm/IR/Operator.h"
45#include "llvm/IR/Type.h"
46#include "llvm/IR/Value.h"
54#include <algorithm>
55#include <cassert>
56#include <cstdint>
57#include <limits>
58#include <optional>
59#include <utility>
60
61namespace llvm {
62
63class Function;
64class GlobalValue;
65class LLVMContext;
66class ScalarEvolution;
67class SCEV;
68class TargetMachine;
69
71
72/// Base class which can be used to help build a TTI implementation.
73///
74/// This class provides as much implementation of the TTI interface as is
75/// possible using the target independent parts of the code generator.
76///
77/// In order to subclass it, your class must implement a getST() method to
78/// return the subtarget, and a getTLI() method to return the target lowering.
79/// We need these methods implemented in the derived class so that this class
80/// doesn't have to duplicate storage for them.
81template <typename T>
83private:
85 using TTI = TargetTransformInfo;
86
87 /// Helper function to access this as a T.
88 const T *thisT() const { return static_cast<const T *>(this); }
89
90 /// Estimate a cost of Broadcast as an extract and sequence of insert
91 /// operations.
93 getBroadcastShuffleOverhead(FixedVectorType *VTy,
96 // Broadcast cost is equal to the cost of extracting the zero'th element
97 // plus the cost of inserting it into every element of the result vector.
98 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
99 CostKind, 0, nullptr, nullptr);
100
101 for (int i = 0, e = VTy->getNumElements(); i < e; ++i) {
102 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
103 CostKind, i, nullptr, nullptr);
104 }
105 return Cost;
106 }
107
108 /// Estimate a cost of shuffle as a sequence of extract and insert
109 /// operations.
111 getPermuteShuffleOverhead(FixedVectorType *VTy,
114 // Shuffle cost is equal to the cost of extracting element from its argument
115 // plus the cost of inserting them onto the result vector.
116
117 // e.g. <4 x float> has a mask of <0,5,2,7> i.e we need to extract from
118 // index 0 of first vector, index 1 of second vector,index 2 of first
119 // vector and finally index 3 of second vector and insert them at index
120 // <0,1,2,3> of result vector.
121 for (int i = 0, e = VTy->getNumElements(); i < e; ++i) {
122 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
123 CostKind, i, nullptr, nullptr);
124 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
125 CostKind, i, nullptr, nullptr);
126 }
127 return Cost;
128 }
129
130 /// Estimate a cost of subvector extraction as a sequence of extract and
131 /// insert operations.
132 InstructionCost getExtractSubvectorOverhead(VectorType *VTy,
134 int Index,
135 FixedVectorType *SubVTy) const {
136 assert(VTy && SubVTy &&
137 "Can only extract subvectors from vectors");
138 int NumSubElts = SubVTy->getNumElements();
140 (Index + NumSubElts) <=
142 "SK_ExtractSubvector index out of range");
143
145 // Subvector extraction cost is equal to the cost of extracting element from
146 // the source type plus the cost of inserting them into the result vector
147 // type.
148 for (int i = 0; i != NumSubElts; ++i) {
149 Cost +=
150 thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
151 CostKind, i + Index, nullptr, nullptr);
152 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, SubVTy,
153 CostKind, i, nullptr, nullptr);
154 }
155 return Cost;
156 }
157
158 /// Estimate a cost of subvector insertion as a sequence of extract and
159 /// insert operations.
160 InstructionCost getInsertSubvectorOverhead(VectorType *VTy,
162 int Index,
163 FixedVectorType *SubVTy) const {
164 assert(VTy && SubVTy &&
165 "Can only insert subvectors into vectors");
166 int NumSubElts = SubVTy->getNumElements();
168 (Index + NumSubElts) <=
170 "SK_InsertSubvector index out of range");
171
173 // Subvector insertion cost is equal to the cost of extracting element from
174 // the source type plus the cost of inserting them into the result vector
175 // type.
176 for (int i = 0; i != NumSubElts; ++i) {
177 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, SubVTy,
178 CostKind, i, nullptr, nullptr);
179 Cost +=
180 thisT()->getVectorInstrCost(Instruction::InsertElement, VTy, CostKind,
181 i + Index, nullptr, nullptr);
182 }
183 return Cost;
184 }
185
186 /// Local query method delegates up to T which *must* implement this!
187 const TargetSubtargetInfo *getST() const {
188 return static_cast<const T *>(this)->getST();
189 }
190
191 /// Local query method delegates up to T which *must* implement this!
192 const TargetLoweringBase *getTLI() const {
193 return static_cast<const T *>(this)->getTLI();
194 }
195
196 static ISD::MemIndexedMode getISDIndexedMode(TTI::MemIndexedMode M) {
197 switch (M) {
199 return ISD::UNINDEXED;
200 case TTI::MIM_PreInc:
201 return ISD::PRE_INC;
202 case TTI::MIM_PreDec:
203 return ISD::PRE_DEC;
204 case TTI::MIM_PostInc:
205 return ISD::POST_INC;
206 case TTI::MIM_PostDec:
207 return ISD::POST_DEC;
208 }
209 llvm_unreachable("Unexpected MemIndexedMode");
210 }
211
212 InstructionCost getCommonMaskedMemoryOpCost(unsigned Opcode, Type *DataTy,
213 Align Alignment,
214 bool VariableMask,
215 bool IsGatherScatter,
217 unsigned AddressSpace = 0) const {
218 // We cannot scalarize scalable vectors, so return Invalid.
219 if (isa<ScalableVectorType>(DataTy))
221
222 auto *VT = cast<FixedVectorType>(DataTy);
223 unsigned VF = VT->getNumElements();
224
225 // Assume the target does not have support for gather/scatter operations
226 // and provide a rough estimate.
227 //
228 // First, compute the cost of the individual memory operations.
229 InstructionCost AddrExtractCost =
230 IsGatherScatter ? getScalarizationOverhead(
232 PointerType::get(VT->getContext(), 0), VF),
233 /*Insert=*/false, /*Extract=*/true, CostKind)
234 : 0;
235
236 // The cost of the scalar loads/stores.
237 InstructionCost MemoryOpCost =
238 VF * thisT()->getMemoryOpCost(Opcode, VT->getElementType(), Alignment,
240
241 // Next, compute the cost of packing the result in a vector.
242 InstructionCost PackingCost =
243 getScalarizationOverhead(VT, Opcode != Instruction::Store,
244 Opcode == Instruction::Store, CostKind);
245
246 InstructionCost ConditionalCost = 0;
247 if (VariableMask) {
248 // Compute the cost of conditionally executing the memory operations with
249 // variable masks. This includes extracting the individual conditions, a
250 // branches and PHIs to combine the results.
251 // NOTE: Estimating the cost of conditionally executing the memory
252 // operations accurately is quite difficult and the current solution
253 // provides a very rough estimate only.
254 ConditionalCost =
257 /*Insert=*/false, /*Extract=*/true, CostKind) +
258 VF * (thisT()->getCFInstrCost(Instruction::Br, CostKind) +
259 thisT()->getCFInstrCost(Instruction::PHI, CostKind));
260 }
261
262 return AddrExtractCost + MemoryOpCost + PackingCost + ConditionalCost;
263 }
264
265 /// Checks if the provided mask \p is a splat mask, i.e. it contains only -1
266 /// or same non -1 index value and this index value contained at least twice.
267 /// So, mask <0, -1,-1, -1> is not considered splat (it is just identity),
268 /// same for <-1, 0, -1, -1> (just a slide), while <2, -1, 2, -1> is a splat
269 /// with \p Index=2.
270 static bool isSplatMask(ArrayRef<int> Mask, unsigned NumSrcElts, int &Index) {
271 // Check that the broadcast index meets at least twice.
272 bool IsCompared = false;
273 if (int SplatIdx = PoisonMaskElem;
274 all_of(enumerate(Mask), [&](const auto &P) {
275 if (P.value() == PoisonMaskElem)
276 return P.index() != Mask.size() - 1 || IsCompared;
277 if (static_cast<unsigned>(P.value()) >= NumSrcElts * 2)
278 return false;
279 if (SplatIdx == PoisonMaskElem) {
280 SplatIdx = P.value();
281 return P.index() != Mask.size() - 1;
282 }
283 IsCompared = true;
284 return SplatIdx == P.value();
285 })) {
286 Index = SplatIdx;
287 return true;
288 }
289 return false;
290 }
291
292 /// Several intrinsics that return structs (including llvm.sincos[pi] and
293 /// llvm.modf) can be lowered to a vector library call (for certain VFs). The
294 /// vector library functions correspond to the scalar calls (e.g. sincos or
295 /// modf), which unlike the intrinsic return values via output pointers. This
296 /// helper checks if a vector call exists for the given intrinsic, and returns
297 /// the cost, which includes the cost of the mask (if required), and the loads
298 /// for values returned via output pointers. \p LC is the scalar libcall and
299 /// \p CallRetElementIndex (optional) is the struct element which is mapped to
300 /// the call return value. If std::nullopt is returned, then no vector library
301 /// call is available, so the intrinsic should be assigned the default cost
302 /// (e.g. scalarization).
303 std::optional<InstructionCost> getMultipleResultIntrinsicVectorLibCallCost(
305 std::optional<unsigned> CallRetElementIndex = {}) const {
306 Type *RetTy = ICA.getReturnType();
307 // Vector variants of the intrinsic can be mapped to a vector library call.
308 if (!isa<StructType>(RetTy) ||
310 return std::nullopt;
311
312 Type *Ty = getContainedTypes(RetTy).front();
313 EVT VT = getTLI()->getValueType(DL, Ty);
314
315 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
316
317 switch (ICA.getID()) {
318 case Intrinsic::modf:
319 LC = RTLIB::getMODF(VT);
320 break;
321 case Intrinsic::sincospi:
322 LC = RTLIB::getSINCOSPI(VT);
323 break;
324 case Intrinsic::sincos:
325 LC = RTLIB::getSINCOS(VT);
326 break;
327 default:
328 return std::nullopt;
329 }
330
331 // Find associated libcall.
332 RTLIB::LibcallImpl LibcallImpl = getTLI()->getLibcallImpl(LC);
333 if (LibcallImpl == RTLIB::Unsupported)
334 return std::nullopt;
335
336 LLVMContext &Ctx = RetTy->getContext();
337
338 // Cost the call + mask.
339 auto Cost =
340 thisT()->getCallInstrCost(nullptr, RetTy, ICA.getArgTypes(), CostKind);
341
344 auto VecTy = VectorType::get(IntegerType::getInt1Ty(Ctx), VF);
345 Cost += thisT()->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy,
346 VecTy, {}, CostKind, 0, nullptr, {});
347 }
348
349 // Lowering to a library call (with output pointers) may require us to emit
350 // reloads for the results.
351 for (auto [Idx, VectorTy] : enumerate(getContainedTypes(RetTy))) {
352 if (Idx == CallRetElementIndex)
353 continue;
354 Cost += thisT()->getMemoryOpCost(
355 Instruction::Load, VectorTy,
356 thisT()->getDataLayout().getABITypeAlign(VectorTy), 0, CostKind);
357 }
358 return Cost;
359 }
360
361 /// Filter out constant and duplicated entries in \p Ops and return a vector
362 /// containing the types from \p Tys corresponding to the remaining operands.
364 filterConstantAndDuplicatedOperands(ArrayRef<const Value *> Ops,
365 ArrayRef<Type *> Tys) {
366 SmallPtrSet<const Value *, 4> UniqueOperands;
367 SmallVector<Type *, 4> FilteredTys;
368 for (const auto &[Op, Ty] : zip_equal(Ops, Tys)) {
369 if (isa<Constant>(Op) || !UniqueOperands.insert(Op).second)
370 continue;
371 FilteredTys.push_back(Ty);
372 }
373 return FilteredTys;
374 }
375
376protected:
377 explicit BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
378 : BaseT(DL) {}
379 ~BasicTTIImplBase() override = default;
380
383
384public:
385 /// \name Scalar TTI Implementations
386 /// @{
388 unsigned AddressSpace, Align Alignment,
389 unsigned *Fast) const override {
390 EVT E = EVT::getIntegerVT(Context, BitWidth);
391 return getTLI()->allowsMisalignedMemoryAccesses(
393 }
394
395 bool areInlineCompatible(const Function *Caller,
396 const Function *Callee) const override {
397 const TargetMachine &TM = getTLI()->getTargetMachine();
398
399 const FeatureBitset &CallerBits =
400 TM.getSubtargetImpl(*Caller)->getFeatureBits();
401 const FeatureBitset &CalleeBits =
402 TM.getSubtargetImpl(*Callee)->getFeatureBits();
403
404 // Inline a callee if its target-features are a subset of the callers
405 // target-features.
406 return (CallerBits & CalleeBits) == CalleeBits;
407 }
408
409 bool hasBranchDivergence(const Function *F = nullptr) const override {
410 return false;
411 }
412
413 bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const override {
414 return false;
415 }
416
417 bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const override {
418 return true;
419 }
420
421 unsigned getFlatAddressSpace() const override {
422 // Return an invalid address space.
423 return -1;
424 }
425
427 Intrinsic::ID IID) const override {
428 return false;
429 }
430
431 bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const override {
432 return getTLI()->getTargetMachine().isNoopAddrSpaceCast(FromAS, ToAS);
433 }
434
435 unsigned getAssumedAddrSpace(const Value *V) const override {
436 return getTLI()->getTargetMachine().getAssumedAddrSpace(V);
437 }
438
439 bool isSingleThreaded() const override {
440 return getTLI()->getTargetMachine().Options.ThreadModel ==
442 }
443
444 std::pair<const Value *, unsigned>
445 getPredicatedAddrSpace(const Value *V) const override {
446 return getTLI()->getTargetMachine().getPredicatedAddrSpace(V);
447 }
448
450 Value *NewV) const override {
451 return nullptr;
452 }
453
454 bool isLegalAddImmediate(int64_t imm) const override {
455 return getTLI()->isLegalAddImmediate(imm);
456 }
457
458 bool isLegalAddScalableImmediate(int64_t Imm) const override {
459 return getTLI()->isLegalAddScalableImmediate(Imm);
460 }
461
462 bool isLegalICmpImmediate(int64_t imm) const override {
463 return getTLI()->isLegalICmpImmediate(imm);
464 }
465
466 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
467 bool HasBaseReg, int64_t Scale, unsigned AddrSpace,
468 Instruction *I = nullptr,
469 int64_t ScalableOffset = 0) const override {
471 AM.BaseGV = BaseGV;
472 AM.BaseOffs = BaseOffset;
473 AM.HasBaseReg = HasBaseReg;
474 AM.Scale = Scale;
475 AM.ScalableOffset = ScalableOffset;
476 return getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace, I);
477 }
478
479 int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset, int64_t MaxOffset) {
480 return getTLI()->getPreferredLargeGEPBaseOffset(MinOffset, MaxOffset);
481 }
482
483 unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy,
484 Type *ScalarValTy) const override {
485 auto &&IsSupportedByTarget = [this, ScalarMemTy, ScalarValTy](unsigned VF) {
486 auto *SrcTy = FixedVectorType::get(ScalarMemTy, VF / 2);
487 EVT VT = getTLI()->getValueType(DL, SrcTy);
488 if (getTLI()->isOperationLegal(ISD::STORE, VT) ||
489 getTLI()->isOperationCustom(ISD::STORE, VT))
490 return true;
491
492 EVT ValVT =
493 getTLI()->getValueType(DL, FixedVectorType::get(ScalarValTy, VF / 2));
494 EVT LegalizedVT =
495 getTLI()->getTypeToTransformTo(ScalarMemTy->getContext(), VT);
496 return getTLI()->isTruncStoreLegal(LegalizedVT, ValVT);
497 };
498 while (VF > 2 && IsSupportedByTarget(VF))
499 VF /= 2;
500 return VF;
501 }
502
503 bool isIndexedLoadLegal(TTI::MemIndexedMode M, Type *Ty) const override {
504 EVT VT = getTLI()->getValueType(DL, Ty, /*AllowUnknown=*/true);
505 return getTLI()->isIndexedLoadLegal(getISDIndexedMode(M), VT);
506 }
507
508 bool isIndexedStoreLegal(TTI::MemIndexedMode M, Type *Ty) const override {
509 EVT VT = getTLI()->getValueType(DL, Ty, /*AllowUnknown=*/true);
510 return getTLI()->isIndexedStoreLegal(getISDIndexedMode(M), VT);
511 }
512
514 const TTI::LSRCost &C2) const override {
516 }
517
521
525
529
531 StackOffset BaseOffset, bool HasBaseReg,
532 int64_t Scale,
533 unsigned AddrSpace) const override {
535 AM.BaseGV = BaseGV;
536 AM.BaseOffs = BaseOffset.getFixed();
537 AM.HasBaseReg = HasBaseReg;
538 AM.Scale = Scale;
539 AM.ScalableOffset = BaseOffset.getScalable();
540 if (getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace))
541 return 0;
543 }
544
545 bool isTruncateFree(Type *Ty1, Type *Ty2) const override {
546 return getTLI()->isTruncateFree(Ty1, Ty2);
547 }
548
549 bool isProfitableToHoist(Instruction *I) const override {
550 return getTLI()->isProfitableToHoist(I);
551 }
552
553 bool useAA() const override { return getST()->useAA(); }
554
555 bool isTypeLegal(Type *Ty) const override {
556 EVT VT = getTLI()->getValueType(DL, Ty, /*AllowUnknown=*/true);
557 return getTLI()->isTypeLegal(VT);
558 }
559
560 unsigned getRegUsageForType(Type *Ty) const override {
561 EVT ETy = getTLI()->getValueType(DL, Ty);
562 return getTLI()->getNumRegisters(Ty->getContext(), ETy);
563 }
564
565 InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr,
566 ArrayRef<const Value *> Operands, Type *AccessType,
567 TTI::TargetCostKind CostKind) const override {
568 return BaseT::getGEPCost(PointeeType, Ptr, Operands, AccessType, CostKind);
569 }
570
572 const SwitchInst &SI, unsigned &JumpTableSize, ProfileSummaryInfo *PSI,
573 BlockFrequencyInfo *BFI) const override {
574 /// Try to find the estimated number of clusters. Note that the number of
575 /// clusters identified in this function could be different from the actual
576 /// numbers found in lowering. This function ignore switches that are
577 /// lowered with a mix of jump table / bit test / BTree. This function was
578 /// initially intended to be used when estimating the cost of switch in
579 /// inline cost heuristic, but it's a generic cost model to be used in other
580 /// places (e.g., in loop unrolling).
581 unsigned N = SI.getNumCases();
582 const TargetLoweringBase *TLI = getTLI();
583 const DataLayout &DL = this->getDataLayout();
584
585 JumpTableSize = 0;
586 bool IsJTAllowed = TLI->areJTsAllowed(SI.getParent()->getParent());
587
588 // Early exit if both a jump table and bit test are not allowed.
589 if (N < 1 || (!IsJTAllowed && DL.getIndexSizeInBits(0u) < N))
590 return N;
591
592 APInt MaxCaseVal = SI.case_begin()->getCaseValue()->getValue();
593 APInt MinCaseVal = MaxCaseVal;
594 for (auto CI : SI.cases()) {
595 const APInt &CaseVal = CI.getCaseValue()->getValue();
596 if (CaseVal.sgt(MaxCaseVal))
597 MaxCaseVal = CaseVal;
598 if (CaseVal.slt(MinCaseVal))
599 MinCaseVal = CaseVal;
600 }
601
602 // Check if suitable for a bit test
603 if (N <= DL.getIndexSizeInBits(0u)) {
605 for (auto I : SI.cases()) {
606 const BasicBlock *BB = I.getCaseSuccessor();
607 ++DestMap[BB];
608 }
609
610 if (TLI->isSuitableForBitTests(DestMap, MinCaseVal, MaxCaseVal, DL))
611 return 1;
612 }
613
614 // Check if suitable for a jump table.
615 if (IsJTAllowed) {
616 if (N < 2 || N < TLI->getMinimumJumpTableEntries())
617 return N;
619 (MaxCaseVal - MinCaseVal)
620 .getLimitedValue(std::numeric_limits<uint64_t>::max() - 1) + 1;
621 // Check whether a range of clusters is dense enough for a jump table
622 if (TLI->isSuitableForJumpTable(&SI, N, Range, PSI, BFI)) {
623 JumpTableSize = Range;
624 return 1;
625 }
626 }
627 return N;
628 }
629
630 bool shouldBuildLookupTables() const override {
631 const TargetLoweringBase *TLI = getTLI();
632 return TLI->isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
633 TLI->isOperationLegalOrCustom(ISD::BRIND, MVT::Other);
634 }
635
636 bool shouldBuildRelLookupTables() const override {
637 const TargetMachine &TM = getTLI()->getTargetMachine();
638 // If non-PIC mode, do not generate a relative lookup table.
639 if (!TM.isPositionIndependent())
640 return false;
641
642 /// Relative lookup table entries consist of 32-bit offsets.
643 /// Do not generate relative lookup tables for large code models
644 /// in 64-bit achitectures where 32-bit offsets might not be enough.
645 if (TM.getCodeModel() == CodeModel::Medium ||
647 return false;
648
649 const Triple &TargetTriple = TM.getTargetTriple();
650 if (!TargetTriple.isArch64Bit())
651 return false;
652
653 // TODO: Triggers issues on aarch64 on darwin, so temporarily disable it
654 // there.
655 if (TargetTriple.getArch() == Triple::aarch64 && TargetTriple.isOSDarwin())
656 return false;
657
658 return true;
659 }
660
661 bool haveFastSqrt(Type *Ty) const override {
662 const TargetLoweringBase *TLI = getTLI();
663 EVT VT = TLI->getValueType(DL, Ty);
664 return TLI->isTypeLegal(VT) &&
666 }
667
668 bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) const override { return true; }
669
670 InstructionCost getFPOpCost(Type *Ty) const override {
671 // Check whether FADD is available, as a proxy for floating-point in
672 // general.
673 const TargetLoweringBase *TLI = getTLI();
674 EVT VT = TLI->getValueType(DL, Ty);
678 }
679
681 const Function &Fn) const override {
682 switch (Inst.getOpcode()) {
683 default:
684 break;
685 case Instruction::SDiv:
686 case Instruction::SRem:
687 case Instruction::UDiv:
688 case Instruction::URem: {
689 if (!isa<ConstantInt>(Inst.getOperand(1)))
690 return false;
691 EVT VT = getTLI()->getValueType(DL, Inst.getType());
692 return !getTLI()->isIntDivCheap(VT, Fn.getAttributes());
693 }
694 };
695
696 return false;
697 }
698
699 unsigned getInliningThresholdMultiplier() const override { return 1; }
700 unsigned adjustInliningThreshold(const CallBase *CB) const override {
701 return 0;
702 }
703 unsigned getCallerAllocaCost(const CallBase *CB,
704 const AllocaInst *AI) const override {
705 return 0;
706 }
707
708 int getInlinerVectorBonusPercent() const override { return 150; }
709
712 OptimizationRemarkEmitter *ORE) const override {
713 // This unrolling functionality is target independent, but to provide some
714 // motivation for its intended use, for x86:
715
716 // According to the Intel 64 and IA-32 Architectures Optimization Reference
717 // Manual, Intel Core models and later have a loop stream detector (and
718 // associated uop queue) that can benefit from partial unrolling.
719 // The relevant requirements are:
720 // - The loop must have no more than 4 (8 for Nehalem and later) branches
721 // taken, and none of them may be calls.
722 // - The loop can have no more than 18 (28 for Nehalem and later) uops.
723
724 // According to the Software Optimization Guide for AMD Family 15h
725 // Processors, models 30h-4fh (Steamroller and later) have a loop predictor
726 // and loop buffer which can benefit from partial unrolling.
727 // The relevant requirements are:
728 // - The loop must have fewer than 16 branches
729 // - The loop must have less than 40 uops in all executed loop branches
730
731 // The number of taken branches in a loop is hard to estimate here, and
732 // benchmarking has revealed that it is better not to be conservative when
733 // estimating the branch count. As a result, we'll ignore the branch limits
734 // until someone finds a case where it matters in practice.
735
736 unsigned MaxOps;
737 const TargetSubtargetInfo *ST = getST();
738 if (PartialUnrollingThreshold.getNumOccurrences() > 0)
740 else if (ST->getSchedModel().LoopMicroOpBufferSize > 0)
741 MaxOps = ST->getSchedModel().LoopMicroOpBufferSize;
742 else
743 return;
744
745 // Scan the loop: don't unroll loops with calls.
746 for (BasicBlock *BB : L->blocks()) {
747 for (Instruction &I : *BB) {
748 if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
749 if (const Function *F = cast<CallBase>(I).getCalledFunction()) {
750 if (!thisT()->isLoweredToCall(F))
751 continue;
752 }
753
754 if (ORE) {
755 ORE->emit([&]() {
756 return OptimizationRemark("TTI", "DontUnroll", L->getStartLoc(),
757 L->getHeader())
758 << "advising against unrolling the loop because it "
759 "contains a "
760 << ore::NV("Call", &I);
761 });
762 }
763 return;
764 }
765 }
766 }
767
768 // Enable runtime and partial unrolling up to the specified size.
769 // Enable using trip count upper bound to unroll loops.
770 UP.Partial = UP.Runtime = UP.UpperBound = true;
771 UP.PartialThreshold = MaxOps;
772
773 // Avoid unrolling when optimizing for size.
774 UP.OptSizeThreshold = 0;
776
777 // Set number of instructions optimized when "back edge"
778 // becomes "fall through" to default value of 2.
779 UP.BEInsns = 2;
780 }
781
783 TTI::PeelingPreferences &PP) const override {
784 PP.PeelCount = 0;
785 PP.AllowPeeling = true;
786 PP.AllowLoopNestsPeeling = false;
787 PP.PeelProfiledIterations = true;
788 }
789
792 HardwareLoopInfo &HWLoopInfo) const override {
793 return BaseT::isHardwareLoopProfitable(L, SE, AC, LibInfo, HWLoopInfo);
794 }
795
796 unsigned getEpilogueVectorizationMinVF() const override {
798 }
799
802 }
803
807
808 std::optional<Instruction *>
811 }
812
813 std::optional<Value *>
815 APInt DemandedMask, KnownBits &Known,
816 bool &KnownBitsComputed) const override {
817 return BaseT::simplifyDemandedUseBitsIntrinsic(IC, II, DemandedMask, Known,
818 KnownBitsComputed);
819 }
820
822 InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
823 APInt &UndefElts2, APInt &UndefElts3,
824 std::function<void(Instruction *, unsigned, APInt, APInt &)>
825 SimplifyAndSetOp) const override {
827 IC, II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
828 SimplifyAndSetOp);
829 }
830
831 std::optional<unsigned>
833 return std::optional<unsigned>(
834 getST()->getCacheSize(static_cast<unsigned>(Level)));
835 }
836
837 std::optional<unsigned>
839 std::optional<unsigned> TargetResult =
840 getST()->getCacheAssociativity(static_cast<unsigned>(Level));
841
842 if (TargetResult)
843 return TargetResult;
844
845 return BaseT::getCacheAssociativity(Level);
846 }
847
848 unsigned getCacheLineSize() const override {
849 return getST()->getCacheLineSize();
850 }
851
852 unsigned getPrefetchDistance() const override {
853 return getST()->getPrefetchDistance();
854 }
855
856 unsigned getMinPrefetchStride(unsigned NumMemAccesses,
857 unsigned NumStridedMemAccesses,
858 unsigned NumPrefetches,
859 bool HasCall) const override {
860 return getST()->getMinPrefetchStride(NumMemAccesses, NumStridedMemAccesses,
861 NumPrefetches, HasCall);
862 }
863
864 unsigned getMaxPrefetchIterationsAhead() const override {
865 return getST()->getMaxPrefetchIterationsAhead();
866 }
867
868 bool enableWritePrefetching() const override {
869 return getST()->enableWritePrefetching();
870 }
871
872 bool shouldPrefetchAddressSpace(unsigned AS) const override {
873 return getST()->shouldPrefetchAddressSpace(AS);
874 }
875
876 /// @}
877
878 /// \name Vector TTI Implementations
879 /// @{
880
885
886 std::optional<unsigned> getMaxVScale() const override { return std::nullopt; }
887 std::optional<unsigned> getVScaleForTuning() const override {
888 return std::nullopt;
889 }
890
891 /// Estimate the overhead of scalarizing an instruction. Insert and Extract
892 /// are set if the demanded result elements need to be inserted and/or
893 /// extracted from vectors.
895 getScalarizationOverhead(VectorType *InTy, const APInt &DemandedElts,
896 bool Insert, bool Extract,
898 bool ForPoisonSrc = true, ArrayRef<Value *> VL = {},
900 TTI::VectorInstrContext::None) const override {
901 /// FIXME: a bitfield is not a reasonable abstraction for talking about
902 /// which elements are needed from a scalable vector
903 if (isa<ScalableVectorType>(InTy))
905 auto *Ty = cast<FixedVectorType>(InTy);
906
907 assert(DemandedElts.getBitWidth() == Ty->getNumElements() &&
908 (VL.empty() || VL.size() == Ty->getNumElements()) &&
909 "Vector size mismatch");
910
912
913 for (int i = 0, e = Ty->getNumElements(); i < e; ++i) {
914 if (!DemandedElts[i])
915 continue;
916 if (Insert) {
917 Value *InsertedVal = VL.empty() ? nullptr : VL[i];
918 Cost +=
919 thisT()->getVectorInstrCost(Instruction::InsertElement, Ty,
920 CostKind, i, nullptr, InsertedVal, VIC);
921 }
922 if (Extract)
923 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
924 CostKind, i, nullptr, nullptr, VIC);
925 }
926
927 return Cost;
928 }
929
931 return false;
932 }
933
934 bool
936 unsigned ScalarOpdIdx) const override {
937 return false;
938 }
939
941 int OpdIdx) const override {
942 return OpdIdx == -1;
943 }
944
945 bool
947 int RetIdx) const override {
948 return RetIdx == 0;
949 }
950
951 /// Helper wrapper for the DemandedElts variant of getScalarizationOverhead.
953 VectorType *InTy, bool Insert, bool Extract, TTI::TargetCostKind CostKind,
954 bool ForPoisonSrc = true, ArrayRef<Value *> VL = {},
956 if (isa<ScalableVectorType>(InTy))
958 auto *Ty = cast<FixedVectorType>(InTy);
959
960 APInt DemandedElts = APInt::getAllOnes(Ty->getNumElements());
961 // Use CRTP to allow target overrides
962 return thisT()->getScalarizationOverhead(Ty, DemandedElts, Insert, Extract,
963 CostKind, ForPoisonSrc, VL, VIC);
964 }
965
966 /// Estimate the overhead of scalarizing an instruction's
967 /// operands. The (potentially vector) types to use for each of
968 /// argument are passes via Tys.
972 TTI::VectorInstrContext::None) const override {
974 for (Type *Ty : Tys) {
975 // Disregard things like metadata arguments.
976 if (!Ty->isIntOrIntVectorTy() && !Ty->isFPOrFPVectorTy() &&
977 !Ty->isPtrOrPtrVectorTy())
978 continue;
979
980 if (auto *VecTy = dyn_cast<VectorType>(Ty))
981 Cost += getScalarizationOverhead(VecTy, /*Insert*/ false,
982 /*Extract*/ true, CostKind,
983 /*ForPoisonSrc=*/true, {}, VIC);
984 }
985
986 return Cost;
987 }
988
989 /// Estimate the overhead of scalarizing the inputs and outputs of an
990 /// instruction, with return type RetTy and arguments Args of type Tys. If
991 /// Args are unknown (empty), then the cost associated with one argument is
992 /// added as a heuristic.
998 RetTy, /*Insert*/ true, /*Extract*/ false, CostKind);
999 if (!Args.empty())
1001 filterConstantAndDuplicatedOperands(Args, Tys), CostKind);
1002 else
1003 // When no information on arguments is provided, we add the cost
1004 // associated with one argument as a heuristic.
1005 Cost += getScalarizationOverhead(RetTy, /*Insert*/ false,
1006 /*Extract*/ true, CostKind);
1007
1008 return Cost;
1009 }
1010
1011 /// Estimate the cost of type-legalization and the legalized type.
1012 std::pair<InstructionCost, MVT> getTypeLegalizationCost(Type *Ty) const {
1013 LLVMContext &C = Ty->getContext();
1014 EVT MTy = getTLI()->getValueType(DL, Ty);
1015
1017 // We keep legalizing the type until we find a legal kind. We assume that
1018 // the only operation that costs anything is the split. After splitting
1019 // we need to handle two types.
1020 while (true) {
1021 TargetLoweringBase::LegalizeKind LK = getTLI()->getTypeConversion(C, MTy);
1022
1024 // Ensure we return a sensible simple VT here, since many callers of
1025 // this function require it.
1026 MVT VT = MTy.isSimple() ? MTy.getSimpleVT() : MVT::i64;
1027 return std::make_pair(InstructionCost::getInvalid(), VT);
1028 }
1029
1030 if (LK.first == TargetLoweringBase::TypeLegal)
1031 return std::make_pair(Cost, MTy.getSimpleVT());
1032
1033 if (LK.first == TargetLoweringBase::TypeSplitVector ||
1035 Cost *= 2;
1036
1037 // Do not loop with f128 type.
1038 if (MTy == LK.second)
1039 return std::make_pair(Cost, MTy.getSimpleVT());
1040
1041 // Keep legalizing the type.
1042 MTy = LK.second;
1043 }
1044 }
1045
1046 unsigned getMaxInterleaveFactor(ElementCount VF) const override { return 1; }
1047
1049 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
1052 ArrayRef<const Value *> Args = {},
1053 const Instruction *CxtI = nullptr) const override {
1054 // Check if any of the operands are vector operands.
1055 const TargetLoweringBase *TLI = getTLI();
1056 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1057 assert(ISD && "Invalid opcode");
1058
1059 // TODO: Handle more cost kinds.
1061 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind,
1062 Opd1Info, Opd2Info,
1063 Args, CxtI);
1064
1065 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
1066
1067 bool IsFloat = Ty->isFPOrFPVectorTy();
1068 // Assume that floating point arithmetic operations cost twice as much as
1069 // integer operations.
1070 InstructionCost OpCost = (IsFloat ? 2 : 1);
1071
1072 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
1073 // The operation is legal. Assume it costs 1.
1074 // TODO: Once we have extract/insert subvector cost we need to use them.
1075 return LT.first * OpCost;
1076 }
1077
1078 if (!TLI->isOperationExpand(ISD, LT.second)) {
1079 // If the operation is custom lowered, then assume that the code is twice
1080 // as expensive.
1081 return LT.first * 2 * OpCost;
1082 }
1083
1084 // An 'Expand' of URem and SRem is special because it may default
1085 // to expanding the operation into a sequence of sub-operations
1086 // i.e. X % Y -> X-(X/Y)*Y.
1087 if (ISD == ISD::UREM || ISD == ISD::SREM) {
1088 bool IsSigned = ISD == ISD::SREM;
1089 if (TLI->isOperationLegalOrCustom(IsSigned ? ISD::SDIVREM : ISD::UDIVREM,
1090 LT.second) ||
1091 TLI->isOperationLegalOrCustom(IsSigned ? ISD::SDIV : ISD::UDIV,
1092 LT.second)) {
1093 unsigned DivOpc = IsSigned ? Instruction::SDiv : Instruction::UDiv;
1094 InstructionCost DivCost = thisT()->getArithmeticInstrCost(
1095 DivOpc, Ty, CostKind, Opd1Info, Opd2Info);
1096 InstructionCost MulCost =
1097 thisT()->getArithmeticInstrCost(Instruction::Mul, Ty, CostKind);
1098 InstructionCost SubCost =
1099 thisT()->getArithmeticInstrCost(Instruction::Sub, Ty, CostKind);
1100 return DivCost + MulCost + SubCost;
1101 }
1102 }
1103
1104 // We cannot scalarize scalable vectors, so return Invalid.
1107
1108 // Else, assume that we need to scalarize this op.
1109 // TODO: If one of the types get legalized by splitting, handle this
1110 // similarly to what getCastInstrCost() does.
1111 if (auto *VTy = dyn_cast<FixedVectorType>(Ty)) {
1112 InstructionCost Cost = thisT()->getArithmeticInstrCost(
1113 Opcode, VTy->getScalarType(), CostKind, Opd1Info, Opd2Info,
1114 Args, CxtI);
1115 // Return the cost of multiple scalar invocation plus the cost of
1116 // inserting and extracting the values.
1117 SmallVector<Type *> Tys(Args.size(), Ty);
1118 return getScalarizationOverhead(VTy, Args, Tys, CostKind) +
1119 VTy->getNumElements() * Cost;
1120 }
1121
1122 // We don't know anything about this scalar instruction.
1123 return OpCost;
1124 }
1125
1127 ArrayRef<int> Mask,
1128 VectorType *SrcTy, int &Index,
1129 VectorType *&SubTy) const {
1130 if (Mask.empty())
1131 return Kind;
1132 int NumDstElts = Mask.size();
1133 int NumSrcElts = SrcTy->getElementCount().getKnownMinValue();
1134 switch (Kind) {
1136 if (ShuffleVectorInst::isReverseMask(Mask, NumSrcElts))
1137 return TTI::SK_Reverse;
1138 if (ShuffleVectorInst::isZeroEltSplatMask(Mask, NumSrcElts))
1139 return TTI::SK_Broadcast;
1140 if (isSplatMask(Mask, NumSrcElts, Index))
1141 return TTI::SK_Broadcast;
1142 if (ShuffleVectorInst::isExtractSubvectorMask(Mask, NumSrcElts, Index) &&
1143 (Index + NumDstElts) <= NumSrcElts) {
1144 SubTy = FixedVectorType::get(SrcTy->getElementType(), NumDstElts);
1146 }
1147 break;
1148 }
1149 case TTI::SK_PermuteTwoSrc: {
1150 if (all_of(Mask, [NumSrcElts](int M) { return M < NumSrcElts; }))
1152 Index, SubTy);
1153 int NumSubElts;
1154 if (NumDstElts > 2 && ShuffleVectorInst::isInsertSubvectorMask(
1155 Mask, NumSrcElts, NumSubElts, Index)) {
1156 if (Index + NumSubElts > NumSrcElts)
1157 return Kind;
1158 SubTy = FixedVectorType::get(SrcTy->getElementType(), NumSubElts);
1160 }
1161 if (ShuffleVectorInst::isSelectMask(Mask, NumSrcElts))
1162 return TTI::SK_Select;
1163 if (ShuffleVectorInst::isTransposeMask(Mask, NumSrcElts))
1164 return TTI::SK_Transpose;
1165 if (ShuffleVectorInst::isSpliceMask(Mask, NumSrcElts, Index))
1166 return TTI::SK_Splice;
1167 break;
1168 }
1169 case TTI::SK_Select:
1170 case TTI::SK_Reverse:
1171 case TTI::SK_Broadcast:
1172 case TTI::SK_Transpose:
1175 case TTI::SK_Splice:
1176 break;
1177 }
1178 return Kind;
1179 }
1180
1184 VectorType *SubTp, ArrayRef<const Value *> Args = {},
1185 const Instruction *CxtI = nullptr) const override {
1186 switch (improveShuffleKindFromMask(Kind, Mask, SrcTy, Index, SubTp)) {
1187 case TTI::SK_Broadcast:
1188 if (auto *FVT = dyn_cast<FixedVectorType>(SrcTy))
1189 return getBroadcastShuffleOverhead(FVT, CostKind);
1191 case TTI::SK_Select:
1192 case TTI::SK_Splice:
1193 case TTI::SK_Reverse:
1194 case TTI::SK_Transpose:
1197 if (auto *FVT = dyn_cast<FixedVectorType>(SrcTy))
1198 return getPermuteShuffleOverhead(FVT, CostKind);
1201 return getExtractSubvectorOverhead(SrcTy, CostKind, Index,
1202 cast<FixedVectorType>(SubTp));
1204 return getInsertSubvectorOverhead(DstTy, CostKind, Index,
1205 cast<FixedVectorType>(SubTp));
1206 }
1207 llvm_unreachable("Unknown TTI::ShuffleKind");
1208 }
1209
1211 getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
1213 const Instruction *I = nullptr) const override {
1214 if (BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I) == 0)
1215 return 0;
1216
1217 const TargetLoweringBase *TLI = getTLI();
1218 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1219 assert(ISD && "Invalid opcode");
1220 std::pair<InstructionCost, MVT> SrcLT = getTypeLegalizationCost(Src);
1221 std::pair<InstructionCost, MVT> DstLT = getTypeLegalizationCost(Dst);
1222
1223 TypeSize SrcSize = SrcLT.second.getSizeInBits();
1224 TypeSize DstSize = DstLT.second.getSizeInBits();
1225 bool IntOrPtrSrc = Src->isIntegerTy() || Src->isPointerTy();
1226 bool IntOrPtrDst = Dst->isIntegerTy() || Dst->isPointerTy();
1227
1228 switch (Opcode) {
1229 default:
1230 break;
1231 case Instruction::Trunc:
1232 // Check for NOOP conversions.
1233 if (TLI->isTruncateFree(SrcLT.second, DstLT.second))
1234 return 0;
1235 [[fallthrough]];
1236 case Instruction::BitCast:
1237 // Bitcast between types that are legalized to the same type are free and
1238 // assume int to/from ptr of the same size is also free.
1239 if (SrcLT.first == DstLT.first && IntOrPtrSrc == IntOrPtrDst &&
1240 SrcSize == DstSize)
1241 return 0;
1242 break;
1243 case Instruction::FPExt:
1244 if (I && getTLI()->isExtFree(I))
1245 return 0;
1246 break;
1247 case Instruction::ZExt:
1248 if (TLI->isZExtFree(SrcLT.second, DstLT.second))
1249 return 0;
1250 [[fallthrough]];
1251 case Instruction::SExt:
1252 if (I && getTLI()->isExtFree(I))
1253 return 0;
1254
1255 // If this is a zext/sext of a load, return 0 if the corresponding
1256 // extending load exists on target and the result type is legal.
1257 if (CCH == TTI::CastContextHint::Normal) {
1258 EVT ExtVT = EVT::getEVT(Dst);
1259 EVT LoadVT = EVT::getEVT(Src);
1260 unsigned LType =
1261 ((Opcode == Instruction::ZExt) ? ISD::ZEXTLOAD : ISD::SEXTLOAD);
1262 if (DstLT.first == SrcLT.first &&
1263 TLI->isLoadExtLegal(LType, ExtVT, LoadVT))
1264 return 0;
1265 }
1266 break;
1267 case Instruction::AddrSpaceCast:
1268 if (TLI->isFreeAddrSpaceCast(Src->getPointerAddressSpace(),
1269 Dst->getPointerAddressSpace()))
1270 return 0;
1271 break;
1272 }
1273
1274 auto *SrcVTy = dyn_cast<VectorType>(Src);
1275 auto *DstVTy = dyn_cast<VectorType>(Dst);
1276
1277 // If the cast is marked as legal (or promote) then assume low cost.
1278 if (SrcLT.first == DstLT.first &&
1279 TLI->isOperationLegalOrPromote(ISD, DstLT.second))
1280 return SrcLT.first;
1281
1282 // Handle scalar conversions.
1283 if (!SrcVTy && !DstVTy) {
1284 // Just check the op cost. If the operation is legal then assume it costs
1285 // 1.
1286 if (!TLI->isOperationExpand(ISD, DstLT.second))
1287 return 1;
1288
1289 // Assume that illegal scalar instruction are expensive.
1290 return 4;
1291 }
1292
1293 // Check vector-to-vector casts.
1294 if (DstVTy && SrcVTy) {
1295 // If the cast is between same-sized registers, then the check is simple.
1296 if (SrcLT.first == DstLT.first && SrcSize == DstSize) {
1297
1298 // Assume that Zext is done using AND.
1299 if (Opcode == Instruction::ZExt)
1300 return SrcLT.first;
1301
1302 // Assume that sext is done using SHL and SRA.
1303 if (Opcode == Instruction::SExt)
1304 return SrcLT.first * 2;
1305
1306 // Just check the op cost. If the operation is legal then assume it
1307 // costs
1308 // 1 and multiply by the type-legalization overhead.
1309 if (!TLI->isOperationExpand(ISD, DstLT.second))
1310 return SrcLT.first * 1;
1311 }
1312
1313 // If we are legalizing by splitting, query the concrete TTI for the cost
1314 // of casting the original vector twice. We also need to factor in the
1315 // cost of the split itself. Count that as 1, to be consistent with
1316 // getTypeLegalizationCost().
1317 bool SplitSrc =
1318 TLI->getTypeAction(Src->getContext(), TLI->getValueType(DL, Src)) ==
1320 bool SplitDst =
1321 TLI->getTypeAction(Dst->getContext(), TLI->getValueType(DL, Dst)) ==
1323 if ((SplitSrc || SplitDst) && SrcVTy->getElementCount().isKnownEven() &&
1324 DstVTy->getElementCount().isKnownEven()) {
1325 Type *SplitDstTy = VectorType::getHalfElementsVectorType(DstVTy);
1326 Type *SplitSrcTy = VectorType::getHalfElementsVectorType(SrcVTy);
1327 const T *TTI = thisT();
1328 // If both types need to be split then the split is free.
1329 InstructionCost SplitCost =
1330 (!SplitSrc || !SplitDst) ? TTI->getVectorSplitCost() : 0;
1331 return SplitCost +
1332 (2 * TTI->getCastInstrCost(Opcode, SplitDstTy, SplitSrcTy, CCH,
1333 CostKind, I));
1334 }
1335
1336 // Scalarization cost is Invalid, can't assume any num elements.
1337 if (isa<ScalableVectorType>(DstVTy))
1339
1340 // In other cases where the source or destination are illegal, assume
1341 // the operation will get scalarized.
1342 unsigned Num = cast<FixedVectorType>(DstVTy)->getNumElements();
1343 InstructionCost Cost = thisT()->getCastInstrCost(
1344 Opcode, Dst->getScalarType(), Src->getScalarType(), CCH, CostKind, I);
1345
1346 // Return the cost of multiple scalar invocation plus the cost of
1347 // inserting and extracting the values.
1348 return getScalarizationOverhead(DstVTy, /*Insert*/ true, /*Extract*/ true,
1349 CostKind) +
1350 Num * Cost;
1351 }
1352
1353 // We already handled vector-to-vector and scalar-to-scalar conversions.
1354 // This
1355 // is where we handle bitcast between vectors and scalars. We need to assume
1356 // that the conversion is scalarized in one way or another.
1357 if (Opcode == Instruction::BitCast) {
1358 // Illegal bitcasts are done by storing and loading from a stack slot.
1359 return (SrcVTy ? getScalarizationOverhead(SrcVTy, /*Insert*/ false,
1360 /*Extract*/ true, CostKind)
1361 : 0) +
1362 (DstVTy ? getScalarizationOverhead(DstVTy, /*Insert*/ true,
1363 /*Extract*/ false, CostKind)
1364 : 0);
1365 }
1366
1367 llvm_unreachable("Unhandled cast");
1368 }
1369
1371 getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy,
1372 unsigned Index,
1373 TTI::TargetCostKind CostKind) const override {
1374 return thisT()->getVectorInstrCost(Instruction::ExtractElement, VecTy,
1375 CostKind, Index, nullptr, nullptr) +
1376 thisT()->getCastInstrCost(Opcode, Dst, VecTy->getElementType(),
1378 }
1379
1382 const Instruction *I = nullptr) const override {
1383 return BaseT::getCFInstrCost(Opcode, CostKind, I);
1384 }
1385
1387 unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred,
1391 const Instruction *I = nullptr) const override {
1392 const TargetLoweringBase *TLI = getTLI();
1393 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1394 assert(ISD && "Invalid opcode");
1395
1396 if (getTLI()->getValueType(DL, ValTy, true) == MVT::Other)
1397 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
1398 Op1Info, Op2Info, I);
1399
1400 // Selects on vectors are actually vector selects.
1401 if (ISD == ISD::SELECT) {
1402 assert(CondTy && "CondTy must exist");
1403 if (CondTy->isVectorTy())
1404 ISD = ISD::VSELECT;
1405 }
1406 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
1407
1408 if (!(ValTy->isVectorTy() && !LT.second.isVector()) &&
1409 !TLI->isOperationExpand(ISD, LT.second)) {
1410 // The operation is legal. Assume it costs 1. Multiply
1411 // by the type-legalization overhead.
1412 return LT.first * 1;
1413 }
1414
1415 // Otherwise, assume that the cast is scalarized.
1416 // TODO: If one of the types get legalized by splitting, handle this
1417 // similarly to what getCastInstrCost() does.
1418 if (auto *ValVTy = dyn_cast<VectorType>(ValTy)) {
1419 if (isa<ScalableVectorType>(ValTy))
1421
1422 unsigned Num = cast<FixedVectorType>(ValVTy)->getNumElements();
1423 InstructionCost Cost = thisT()->getCmpSelInstrCost(
1424 Opcode, ValVTy->getScalarType(), CondTy->getScalarType(), VecPred,
1425 CostKind, Op1Info, Op2Info, I);
1426
1427 // Return the cost of multiple scalar invocation plus the cost of
1428 // inserting and extracting the values.
1429 return getScalarizationOverhead(ValVTy, /*Insert*/ true,
1430 /*Extract*/ false, CostKind) +
1431 Num * Cost;
1432 }
1433
1434 // Unknown scalar opcode.
1435 return 1;
1436 }
1437
1440 unsigned Index, const Value *Op0, const Value *Op1,
1442 TTI::VectorInstrContext::None) const override {
1443 return getRegUsageForType(Val->getScalarType());
1444 }
1445
1446 /// \param ScalarUserAndIdx encodes the information about extracts from a
1447 /// vector with 'Scalar' being the value being extracted,'User' being the user
1448 /// of the extract(nullptr if user is not known before vectorization) and
1449 /// 'Idx' being the extract lane.
1451 unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index,
1452 Value *Scalar,
1453 ArrayRef<std::tuple<Value *, User *, int>> ScalarUserAndIdx,
1455 TTI::VectorInstrContext::None) const override {
1456 return getVectorInstrCost(Opcode, Val, CostKind, Index, nullptr, nullptr,
1457 VIC);
1458 }
1459
1462 TTI::TargetCostKind CostKind, unsigned Index,
1464 TTI::VectorInstrContext::None) const override {
1465 Value *Op0 = nullptr;
1466 Value *Op1 = nullptr;
1467 if (auto *IE = dyn_cast<InsertElementInst>(&I)) {
1468 Op0 = IE->getOperand(0);
1469 Op1 = IE->getOperand(1);
1470 }
1471 // If VIC is None, compute it from the instruction
1474 return thisT()->getVectorInstrCost(I.getOpcode(), Val, CostKind, Index, Op0,
1475 Op1, VIC);
1476 }
1477
1481 unsigned Index) const override {
1482 unsigned NewIndex = -1;
1483 if (auto *FVTy = dyn_cast<FixedVectorType>(Val)) {
1484 assert(Index < FVTy->getNumElements() &&
1485 "Unexpected index from end of vector");
1486 NewIndex = FVTy->getNumElements() - 1 - Index;
1487 }
1488 return thisT()->getVectorInstrCost(Opcode, Val, CostKind, NewIndex, nullptr,
1489 nullptr);
1490 }
1491
1493 getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF,
1494 const APInt &DemandedDstElts,
1495 TTI::TargetCostKind CostKind) const override {
1496 assert(DemandedDstElts.getBitWidth() == (unsigned)VF * ReplicationFactor &&
1497 "Unexpected size of DemandedDstElts.");
1498
1500
1501 auto *SrcVT = FixedVectorType::get(EltTy, VF);
1502 auto *ReplicatedVT = FixedVectorType::get(EltTy, VF * ReplicationFactor);
1503
1504 // The Mask shuffling cost is extract all the elements of the Mask
1505 // and insert each of them Factor times into the wide vector:
1506 //
1507 // E.g. an interleaved group with factor 3:
1508 // %mask = icmp ult <8 x i32> %vec1, %vec2
1509 // %interleaved.mask = shufflevector <8 x i1> %mask, <8 x i1> undef,
1510 // <24 x i32> <0,0,0,1,1,1,2,2,2,3,3,3,4,4,4,5,5,5,6,6,6,7,7,7>
1511 // The cost is estimated as extract all mask elements from the <8xi1> mask
1512 // vector and insert them factor times into the <24xi1> shuffled mask
1513 // vector.
1514 APInt DemandedSrcElts = APIntOps::ScaleBitMask(DemandedDstElts, VF);
1515 Cost += thisT()->getScalarizationOverhead(SrcVT, DemandedSrcElts,
1516 /*Insert*/ false,
1517 /*Extract*/ true, CostKind);
1518 Cost += thisT()->getScalarizationOverhead(ReplicatedVT, DemandedDstElts,
1519 /*Insert*/ true,
1520 /*Extract*/ false, CostKind);
1521
1522 return Cost;
1523 }
1524
1526 unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
1529 const Instruction *I = nullptr) const override {
1530 assert(!Src->isVoidTy() && "Invalid type");
1531 // Assume types, such as structs, are expensive.
1532 if (getTLI()->getValueType(DL, Src, true) == MVT::Other)
1533 return 4;
1534 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Src);
1535
1536 // Assuming that all loads of legal types cost 1.
1537 InstructionCost Cost = LT.first;
1539 return Cost;
1540
1541 const DataLayout &DL = this->getDataLayout();
1542 if (Src->isVectorTy() &&
1543 // In practice it's not currently possible to have a change in lane
1544 // length for extending loads or truncating stores so both types should
1545 // have the same scalable property.
1546 TypeSize::isKnownLT(DL.getTypeStoreSizeInBits(Src),
1547 LT.second.getSizeInBits())) {
1548 // This is a vector load that legalizes to a larger type than the vector
1549 // itself. Unless the corresponding extending load or truncating store is
1550 // legal, then this will scalarize.
1552 EVT MemVT = getTLI()->getValueType(DL, Src);
1553 if (Opcode == Instruction::Store)
1554 LA = getTLI()->getTruncStoreAction(LT.second, MemVT);
1555 else
1556 LA = getTLI()->getLoadExtAction(ISD::EXTLOAD, LT.second, MemVT);
1557
1558 if (LA != TargetLowering::Legal && LA != TargetLowering::Custom) {
1559 // This is a vector load/store for some illegal type that is scalarized.
1560 // We must account for the cost of building or decomposing the vector.
1562 cast<VectorType>(Src), Opcode != Instruction::Store,
1563 Opcode == Instruction::Store, CostKind);
1564 }
1565 }
1566
1567 return Cost;
1568 }
1569
1571 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
1572 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
1573 bool UseMaskForCond = false, bool UseMaskForGaps = false) const override {
1574
1575 // We cannot scalarize scalable vectors, so return Invalid.
1576 if (isa<ScalableVectorType>(VecTy))
1578
1579 auto *VT = cast<FixedVectorType>(VecTy);
1580
1581 unsigned NumElts = VT->getNumElements();
1582 assert(Factor > 1 && NumElts % Factor == 0 && "Invalid interleave factor");
1583
1584 unsigned NumSubElts = NumElts / Factor;
1585 auto *SubVT = FixedVectorType::get(VT->getElementType(), NumSubElts);
1586
1587 // Firstly, the cost of load/store operation.
1589 if (UseMaskForCond || UseMaskForGaps) {
1590 unsigned IID = Opcode == Instruction::Load ? Intrinsic::masked_load
1591 : Intrinsic::masked_store;
1592 Cost = thisT()->getMemIntrinsicInstrCost(
1593 MemIntrinsicCostAttributes(IID, VecTy, Alignment, AddressSpace),
1594 CostKind);
1595 } else
1596 Cost = thisT()->getMemoryOpCost(Opcode, VecTy, Alignment, AddressSpace,
1597 CostKind);
1598
1599 // Legalize the vector type, and get the legalized and unlegalized type
1600 // sizes.
1601 MVT VecTyLT = getTypeLegalizationCost(VecTy).second;
1602 unsigned VecTySize = thisT()->getDataLayout().getTypeStoreSize(VecTy);
1603 unsigned VecTyLTSize = VecTyLT.getStoreSize();
1604
1605 // Scale the cost of the memory operation by the fraction of legalized
1606 // instructions that will actually be used. We shouldn't account for the
1607 // cost of dead instructions since they will be removed.
1608 //
1609 // E.g., An interleaved load of factor 8:
1610 // %vec = load <16 x i64>, <16 x i64>* %ptr
1611 // %v0 = shufflevector %vec, undef, <0, 8>
1612 //
1613 // If <16 x i64> is legalized to 8 v2i64 loads, only 2 of the loads will be
1614 // used (those corresponding to elements [0:1] and [8:9] of the unlegalized
1615 // type). The other loads are unused.
1616 //
1617 // TODO: Note that legalization can turn masked loads/stores into unmasked
1618 // (legalized) loads/stores. This can be reflected in the cost.
1619 if (Cost.isValid() && VecTySize > VecTyLTSize) {
1620 // The number of loads of a legal type it will take to represent a load
1621 // of the unlegalized vector type.
1622 unsigned NumLegalInsts = divideCeil(VecTySize, VecTyLTSize);
1623
1624 // The number of elements of the unlegalized type that correspond to a
1625 // single legal instruction.
1626 unsigned NumEltsPerLegalInst = divideCeil(NumElts, NumLegalInsts);
1627
1628 // Determine which legal instructions will be used.
1629 BitVector UsedInsts(NumLegalInsts, false);
1630 for (unsigned Index : Indices)
1631 for (unsigned Elt = 0; Elt < NumSubElts; ++Elt)
1632 UsedInsts.set((Index + Elt * Factor) / NumEltsPerLegalInst);
1633
1634 // Scale the cost of the load by the fraction of legal instructions that
1635 // will be used.
1636 Cost = divideCeil(UsedInsts.count() * Cost.getValue(), NumLegalInsts);
1637 }
1638
1639 // Then plus the cost of interleave operation.
1640 assert(Indices.size() <= Factor &&
1641 "Interleaved memory op has too many members");
1642
1643 const APInt DemandedAllSubElts = APInt::getAllOnes(NumSubElts);
1644 const APInt DemandedAllResultElts = APInt::getAllOnes(NumElts);
1645
1646 APInt DemandedLoadStoreElts = APInt::getZero(NumElts);
1647 for (unsigned Index : Indices) {
1648 assert(Index < Factor && "Invalid index for interleaved memory op");
1649 for (unsigned Elm = 0; Elm < NumSubElts; Elm++)
1650 DemandedLoadStoreElts.setBit(Index + Elm * Factor);
1651 }
1652
1653 if (Opcode == Instruction::Load) {
1654 // The interleave cost is similar to extract sub vectors' elements
1655 // from the wide vector, and insert them into sub vectors.
1656 //
1657 // E.g. An interleaved load of factor 2 (with one member of index 0):
1658 // %vec = load <8 x i32>, <8 x i32>* %ptr
1659 // %v0 = shuffle %vec, undef, <0, 2, 4, 6> ; Index 0
1660 // The cost is estimated as extract elements at 0, 2, 4, 6 from the
1661 // <8 x i32> vector and insert them into a <4 x i32> vector.
1662 InstructionCost InsSubCost = thisT()->getScalarizationOverhead(
1663 SubVT, DemandedAllSubElts,
1664 /*Insert*/ true, /*Extract*/ false, CostKind);
1665 Cost += Indices.size() * InsSubCost;
1666 Cost += thisT()->getScalarizationOverhead(VT, DemandedLoadStoreElts,
1667 /*Insert*/ false,
1668 /*Extract*/ true, CostKind);
1669 } else {
1670 // The interleave cost is extract elements from sub vectors, and
1671 // insert them into the wide vector.
1672 //
1673 // E.g. An interleaved store of factor 3 with 2 members at indices 0,1:
1674 // (using VF=4):
1675 // %v0_v1 = shuffle %v0, %v1, <0,4,undef,1,5,undef,2,6,undef,3,7,undef>
1676 // %gaps.mask = <true, true, false, true, true, false,
1677 // true, true, false, true, true, false>
1678 // call llvm.masked.store <12 x i32> %v0_v1, <12 x i32>* %ptr,
1679 // i32 Align, <12 x i1> %gaps.mask
1680 // The cost is estimated as extract all elements (of actual members,
1681 // excluding gaps) from both <4 x i32> vectors and insert into the <12 x
1682 // i32> vector.
1683 InstructionCost ExtSubCost = thisT()->getScalarizationOverhead(
1684 SubVT, DemandedAllSubElts,
1685 /*Insert*/ false, /*Extract*/ true, CostKind);
1686 Cost += ExtSubCost * Indices.size();
1687 Cost += thisT()->getScalarizationOverhead(VT, DemandedLoadStoreElts,
1688 /*Insert*/ true,
1689 /*Extract*/ false, CostKind);
1690 }
1691
1692 if (!UseMaskForCond)
1693 return Cost;
1694
1695 Type *I8Type = Type::getInt8Ty(VT->getContext());
1696
1697 Cost += thisT()->getReplicationShuffleCost(
1698 I8Type, Factor, NumSubElts,
1699 UseMaskForGaps ? DemandedLoadStoreElts : DemandedAllResultElts,
1700 CostKind);
1701
1702 // The Gaps mask is invariant and created outside the loop, therefore the
1703 // cost of creating it is not accounted for here. However if we have both
1704 // a MaskForGaps and some other mask that guards the execution of the
1705 // memory access, we need to account for the cost of And-ing the two masks
1706 // inside the loop.
1707 if (UseMaskForGaps) {
1708 auto *MaskVT = FixedVectorType::get(I8Type, NumElts);
1709 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::And, MaskVT,
1710 CostKind);
1711 }
1712
1713 return Cost;
1714 }
1715
1716 /// Get intrinsic cost based on arguments.
1719 TTI::TargetCostKind CostKind) const override {
1720 // Check for generically free intrinsics.
1722 return 0;
1723
1724 // Assume that target intrinsics are cheap.
1725 Intrinsic::ID IID = ICA.getID();
1728
1729 // VP Intrinsics should have the same cost as their non-vp counterpart.
1730 // TODO: Adjust the cost to make the vp intrinsic cheaper than its non-vp
1731 // counterpart when the vector length argument is smaller than the maximum
1732 // vector length.
1733 // TODO: Support other kinds of VPIntrinsics
1734 if (VPIntrinsic::isVPIntrinsic(ICA.getID())) {
1735 std::optional<unsigned> FOp =
1737 if (FOp) {
1738 if (ICA.getID() == Intrinsic::vp_load) {
1739 Align Alignment;
1740 if (auto *VPI = dyn_cast_or_null<VPIntrinsic>(ICA.getInst()))
1741 Alignment = VPI->getPointerAlignment().valueOrOne();
1742 unsigned AS = 0;
1743 if (ICA.getArgTypes().size() > 1)
1744 if (auto *PtrTy = dyn_cast<PointerType>(ICA.getArgTypes()[0]))
1745 AS = PtrTy->getAddressSpace();
1746 return thisT()->getMemoryOpCost(*FOp, ICA.getReturnType(), Alignment,
1747 AS, CostKind);
1748 }
1749 if (ICA.getID() == Intrinsic::vp_store) {
1750 Align Alignment;
1751 if (auto *VPI = dyn_cast_or_null<VPIntrinsic>(ICA.getInst()))
1752 Alignment = VPI->getPointerAlignment().valueOrOne();
1753 unsigned AS = 0;
1754 if (ICA.getArgTypes().size() >= 2)
1755 if (auto *PtrTy = dyn_cast<PointerType>(ICA.getArgTypes()[1]))
1756 AS = PtrTy->getAddressSpace();
1757 return thisT()->getMemoryOpCost(*FOp, ICA.getArgTypes()[0], Alignment,
1758 AS, CostKind);
1759 }
1761 ICA.getID() == Intrinsic::vp_fneg) {
1762 return thisT()->getArithmeticInstrCost(*FOp, ICA.getReturnType(),
1763 CostKind);
1764 }
1765 if (VPCastIntrinsic::isVPCast(ICA.getID())) {
1766 return thisT()->getCastInstrCost(
1767 *FOp, ICA.getReturnType(), ICA.getArgTypes()[0],
1769 }
1770 if (VPCmpIntrinsic::isVPCmp(ICA.getID())) {
1771 // We can only handle vp_cmp intrinsics with underlying instructions.
1772 if (ICA.getInst()) {
1773 assert(FOp);
1774 auto *UI = cast<VPCmpIntrinsic>(ICA.getInst());
1775 return thisT()->getCmpSelInstrCost(*FOp, ICA.getArgTypes()[0],
1776 ICA.getReturnType(),
1777 UI->getPredicate(), CostKind);
1778 }
1779 }
1780 }
1781 if (ICA.getID() == Intrinsic::vp_load_ff) {
1782 Type *RetTy = ICA.getReturnType();
1783 Type *DataTy = cast<StructType>(RetTy)->getElementType(0);
1784 Align Alignment;
1785 if (auto *VPI = dyn_cast_or_null<VPIntrinsic>(ICA.getInst()))
1786 Alignment = VPI->getPointerAlignment().valueOrOne();
1787 return thisT()->getMemIntrinsicInstrCost(
1788 MemIntrinsicCostAttributes(ICA.getID(), DataTy, Alignment),
1789 CostKind);
1790 }
1791 if (ICA.getID() == Intrinsic::vp_scatter) {
1792 if (ICA.isTypeBasedOnly()) {
1793 IntrinsicCostAttributes MaskedScatter(
1796 ICA.getFlags());
1797 return getTypeBasedIntrinsicInstrCost(MaskedScatter, CostKind);
1798 }
1799 Align Alignment;
1800 if (auto *VPI = dyn_cast_or_null<VPIntrinsic>(ICA.getInst()))
1801 Alignment = VPI->getPointerAlignment().valueOrOne();
1802 bool VarMask = isa<Constant>(ICA.getArgs()[2]);
1803 return thisT()->getMemIntrinsicInstrCost(
1804 MemIntrinsicCostAttributes(Intrinsic::vp_scatter,
1805 ICA.getArgTypes()[0], ICA.getArgs()[1],
1806 VarMask, Alignment, nullptr),
1807 CostKind);
1808 }
1809 if (ICA.getID() == Intrinsic::vp_gather) {
1810 if (ICA.isTypeBasedOnly()) {
1811 IntrinsicCostAttributes MaskedGather(
1814 ICA.getFlags());
1815 return getTypeBasedIntrinsicInstrCost(MaskedGather, CostKind);
1816 }
1817 Align Alignment;
1818 if (auto *VPI = dyn_cast_or_null<VPIntrinsic>(ICA.getInst()))
1819 Alignment = VPI->getPointerAlignment().valueOrOne();
1820 bool VarMask = isa<Constant>(ICA.getArgs()[1]);
1821 return thisT()->getMemIntrinsicInstrCost(
1822 MemIntrinsicCostAttributes(Intrinsic::vp_gather,
1823 ICA.getReturnType(), ICA.getArgs()[0],
1824 VarMask, Alignment, nullptr),
1825 CostKind);
1826 }
1827
1828 if (ICA.getID() == Intrinsic::vp_select ||
1829 ICA.getID() == Intrinsic::vp_merge) {
1830 TTI::OperandValueInfo OpInfoX, OpInfoY;
1831 if (!ICA.isTypeBasedOnly()) {
1832 OpInfoX = TTI::getOperandInfo(ICA.getArgs()[0]);
1833 OpInfoY = TTI::getOperandInfo(ICA.getArgs()[1]);
1834 }
1835 return getCmpSelInstrCost(
1836 Instruction::Select, ICA.getReturnType(), ICA.getArgTypes()[0],
1837 CmpInst::BAD_ICMP_PREDICATE, CostKind, OpInfoX, OpInfoY);
1838 }
1839
1840 std::optional<Intrinsic::ID> FID =
1842
1843 // Not functionally equivalent but close enough for cost modelling.
1844 if (ICA.getID() == Intrinsic::experimental_vp_reverse)
1845 FID = Intrinsic::vector_reverse;
1846
1847 if (FID) {
1848 // Non-vp version will have same arg types except mask and vector
1849 // length.
1850 assert(ICA.getArgTypes().size() >= 2 &&
1851 "Expected VPIntrinsic to have Mask and Vector Length args and "
1852 "types");
1853
1854 ArrayRef<const Value *> NewArgs = ArrayRef(ICA.getArgs());
1855 if (!ICA.isTypeBasedOnly())
1856 NewArgs = NewArgs.drop_back(2);
1858
1859 // VPReduction intrinsics have a start value argument that their non-vp
1860 // counterparts do not have, except for the fadd and fmul non-vp
1861 // counterpart.
1863 *FID != Intrinsic::vector_reduce_fadd &&
1864 *FID != Intrinsic::vector_reduce_fmul) {
1865 if (!ICA.isTypeBasedOnly())
1866 NewArgs = NewArgs.drop_front();
1867 NewTys = NewTys.drop_front();
1868 }
1869
1870 IntrinsicCostAttributes NewICA(*FID, ICA.getReturnType(), NewArgs,
1871 NewTys, ICA.getFlags());
1872 return thisT()->getIntrinsicInstrCost(NewICA, CostKind);
1873 }
1874 }
1875
1876 if (ICA.isTypeBasedOnly())
1878
1879 Type *RetTy = ICA.getReturnType();
1880
1881 ElementCount RetVF = isVectorizedTy(RetTy) ? getVectorizedTypeVF(RetTy)
1883
1884 const IntrinsicInst *I = ICA.getInst();
1885 const SmallVectorImpl<const Value *> &Args = ICA.getArgs();
1886 FastMathFlags FMF = ICA.getFlags();
1887 switch (IID) {
1888 default:
1889 break;
1890
1891 case Intrinsic::powi:
1892 if (auto *RHSC = dyn_cast<ConstantInt>(Args[1])) {
1893 bool ShouldOptForSize = I->getParent()->getParent()->hasOptSize();
1894 if (getTLI()->isBeneficialToExpandPowI(RHSC->getSExtValue(),
1895 ShouldOptForSize)) {
1896 // The cost is modeled on the expansion performed by ExpandPowI in
1897 // SelectionDAGBuilder.
1898 APInt Exponent = RHSC->getValue().abs();
1899 unsigned ActiveBits = Exponent.getActiveBits();
1900 unsigned PopCount = Exponent.popcount();
1901 InstructionCost Cost = (ActiveBits + PopCount - 2) *
1902 thisT()->getArithmeticInstrCost(
1903 Instruction::FMul, RetTy, CostKind);
1904 if (RHSC->isNegative())
1905 Cost += thisT()->getArithmeticInstrCost(Instruction::FDiv, RetTy,
1906 CostKind);
1907 return Cost;
1908 }
1909 }
1910 break;
1911 case Intrinsic::cttz:
1912 // FIXME: If necessary, this should go in target-specific overrides.
1913 if (RetVF.isScalar() && getTLI()->isCheapToSpeculateCttz(RetTy))
1915 break;
1916
1917 case Intrinsic::ctlz:
1918 // FIXME: If necessary, this should go in target-specific overrides.
1919 if (RetVF.isScalar() && getTLI()->isCheapToSpeculateCtlz(RetTy))
1921 break;
1922
1923 case Intrinsic::memcpy:
1924 return thisT()->getMemcpyCost(ICA.getInst());
1925
1926 case Intrinsic::masked_scatter: {
1927 const Value *Mask = Args[2];
1928 bool VarMask = !isa<Constant>(Mask);
1929 Align Alignment = I->getParamAlign(1).valueOrOne();
1930 return thisT()->getMemIntrinsicInstrCost(
1931 MemIntrinsicCostAttributes(Intrinsic::masked_scatter,
1932 ICA.getArgTypes()[0], Args[1], VarMask,
1933 Alignment, I),
1934 CostKind);
1935 }
1936 case Intrinsic::masked_gather: {
1937 const Value *Mask = Args[1];
1938 bool VarMask = !isa<Constant>(Mask);
1939 Align Alignment = I->getParamAlign(0).valueOrOne();
1940 return thisT()->getMemIntrinsicInstrCost(
1941 MemIntrinsicCostAttributes(Intrinsic::masked_gather, RetTy, Args[0],
1942 VarMask, Alignment, I),
1943 CostKind);
1944 }
1945 case Intrinsic::masked_compressstore: {
1946 const Value *Data = Args[0];
1947 const Value *Mask = Args[2];
1948 Align Alignment = I->getParamAlign(1).valueOrOne();
1949 return thisT()->getMemIntrinsicInstrCost(
1950 MemIntrinsicCostAttributes(IID, Data->getType(), !isa<Constant>(Mask),
1951 Alignment, I),
1952 CostKind);
1953 }
1954 case Intrinsic::masked_expandload: {
1955 const Value *Mask = Args[1];
1956 Align Alignment = I->getParamAlign(0).valueOrOne();
1957 return thisT()->getMemIntrinsicInstrCost(
1958 MemIntrinsicCostAttributes(IID, RetTy, !isa<Constant>(Mask),
1959 Alignment, I),
1960 CostKind);
1961 }
1962 case Intrinsic::experimental_vp_strided_store: {
1963 const Value *Data = Args[0];
1964 const Value *Ptr = Args[1];
1965 const Value *Mask = Args[3];
1966 const Value *EVL = Args[4];
1967 bool VarMask = !isa<Constant>(Mask) || !isa<Constant>(EVL);
1968 Type *EltTy = cast<VectorType>(Data->getType())->getElementType();
1969 Align Alignment =
1970 I->getParamAlign(1).value_or(thisT()->DL.getABITypeAlign(EltTy));
1971 return thisT()->getMemIntrinsicInstrCost(
1972 MemIntrinsicCostAttributes(IID, Data->getType(), Ptr, VarMask,
1973 Alignment, I),
1974 CostKind);
1975 }
1976 case Intrinsic::experimental_vp_strided_load: {
1977 const Value *Ptr = Args[0];
1978 const Value *Mask = Args[2];
1979 const Value *EVL = Args[3];
1980 bool VarMask = !isa<Constant>(Mask) || !isa<Constant>(EVL);
1981 Type *EltTy = cast<VectorType>(RetTy)->getElementType();
1982 Align Alignment =
1983 I->getParamAlign(0).value_or(thisT()->DL.getABITypeAlign(EltTy));
1984 return thisT()->getMemIntrinsicInstrCost(
1985 MemIntrinsicCostAttributes(IID, RetTy, Ptr, VarMask, Alignment, I),
1986 CostKind);
1987 }
1988 case Intrinsic::stepvector: {
1989 if (isa<ScalableVectorType>(RetTy))
1991 // The cost of materialising a constant integer vector.
1993 }
1994 case Intrinsic::vector_extract: {
1995 // FIXME: Handle case where a scalable vector is extracted from a scalable
1996 // vector
1997 if (isa<ScalableVectorType>(RetTy))
1999 unsigned Index = cast<ConstantInt>(Args[1])->getZExtValue();
2000 return thisT()->getShuffleCost(TTI::SK_ExtractSubvector,
2001 cast<VectorType>(RetTy),
2002 cast<VectorType>(Args[0]->getType()), {},
2003 CostKind, Index, cast<VectorType>(RetTy));
2004 }
2005 case Intrinsic::vector_insert: {
2006 // FIXME: Handle case where a scalable vector is inserted into a scalable
2007 // vector
2008 if (isa<ScalableVectorType>(Args[1]->getType()))
2010 unsigned Index = cast<ConstantInt>(Args[2])->getZExtValue();
2011 return thisT()->getShuffleCost(
2013 cast<VectorType>(Args[0]->getType()), {}, CostKind, Index,
2014 cast<VectorType>(Args[1]->getType()));
2015 }
2016 case Intrinsic::vector_splice_left:
2017 case Intrinsic::vector_splice_right: {
2018 auto *COffset = dyn_cast<ConstantInt>(Args[2]);
2019 if (!COffset)
2020 break;
2021 unsigned Index = COffset->getZExtValue();
2022 return thisT()->getShuffleCost(
2024 cast<VectorType>(Args[0]->getType()), {}, CostKind,
2025 IID == Intrinsic::vector_splice_left ? Index : -Index,
2026 cast<VectorType>(RetTy));
2027 }
2028 case Intrinsic::vector_reduce_add:
2029 case Intrinsic::vector_reduce_mul:
2030 case Intrinsic::vector_reduce_and:
2031 case Intrinsic::vector_reduce_or:
2032 case Intrinsic::vector_reduce_xor:
2033 case Intrinsic::vector_reduce_smax:
2034 case Intrinsic::vector_reduce_smin:
2035 case Intrinsic::vector_reduce_fmax:
2036 case Intrinsic::vector_reduce_fmin:
2037 case Intrinsic::vector_reduce_fmaximum:
2038 case Intrinsic::vector_reduce_fminimum:
2039 case Intrinsic::vector_reduce_umax:
2040 case Intrinsic::vector_reduce_umin: {
2041 IntrinsicCostAttributes Attrs(IID, RetTy, Args[0]->getType(), FMF, I, 1);
2043 }
2044 case Intrinsic::vector_reduce_fadd:
2045 case Intrinsic::vector_reduce_fmul: {
2047 IID, RetTy, {Args[0]->getType(), Args[1]->getType()}, FMF, I, 1);
2049 }
2050 case Intrinsic::fshl:
2051 case Intrinsic::fshr: {
2052 const Value *X = Args[0];
2053 const Value *Y = Args[1];
2054 const Value *Z = Args[2];
2057 const TTI::OperandValueInfo OpInfoZ = TTI::getOperandInfo(Z);
2058
2059 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
2060 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
2062 Cost +=
2063 thisT()->getArithmeticInstrCost(BinaryOperator::Or, RetTy, CostKind);
2064 Cost +=
2065 thisT()->getArithmeticInstrCost(BinaryOperator::Sub, RetTy, CostKind);
2066 Cost += thisT()->getArithmeticInstrCost(
2067 BinaryOperator::Shl, RetTy, CostKind, OpInfoX,
2068 {OpInfoZ.Kind, TTI::OP_None});
2069 Cost += thisT()->getArithmeticInstrCost(
2070 BinaryOperator::LShr, RetTy, CostKind, OpInfoY,
2071 {OpInfoZ.Kind, TTI::OP_None});
2072 // Non-constant shift amounts requires a modulo. If the typesize is a
2073 // power-2 then this will be converted to an and, otherwise it will use a
2074 // urem.
2075 if (!OpInfoZ.isConstant())
2076 Cost += thisT()->getArithmeticInstrCost(
2077 isPowerOf2_32(RetTy->getScalarSizeInBits()) ? BinaryOperator::And
2078 : BinaryOperator::URem,
2079 RetTy, CostKind, OpInfoZ,
2080 {TTI::OK_UniformConstantValue, TTI::OP_None});
2081 // For non-rotates (X != Y) we must add shift-by-zero handling costs.
2082 if (X != Y) {
2083 Type *CondTy = RetTy->getWithNewBitWidth(1);
2084 Cost +=
2085 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2087 Cost +=
2088 thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2090 }
2091 return Cost;
2092 }
2093 case Intrinsic::experimental_cttz_elts: {
2094 EVT ArgType = getTLI()->getValueType(DL, ICA.getArgTypes()[0], true);
2095
2096 // If we're not expanding the intrinsic then we assume this is cheap
2097 // to implement.
2098 if (!getTLI()->shouldExpandCttzElements(ArgType))
2099 return getTypeLegalizationCost(RetTy).first;
2100
2101 // TODO: The costs below reflect the expansion code in
2102 // SelectionDAGBuilder, but we may want to sacrifice some accuracy in
2103 // favour of compile time.
2104
2105 // Find the smallest "sensible" element type to use for the expansion.
2106 bool ZeroIsPoison = !cast<ConstantInt>(Args[1])->isZero();
2107 ConstantRange VScaleRange(APInt(64, 1), APInt::getZero(64));
2108 if (isa<ScalableVectorType>(ICA.getArgTypes()[0]) && I && I->getCaller())
2109 VScaleRange = getVScaleRange(I->getCaller(), 64);
2110
2111 unsigned EltWidth = getTLI()->getBitWidthForCttzElements(
2112 RetTy, ArgType.getVectorElementCount(), ZeroIsPoison, &VScaleRange);
2113 Type *NewEltTy = IntegerType::getIntNTy(RetTy->getContext(), EltWidth);
2114
2115 // Create the new vector type & get the vector length
2116 Type *NewVecTy = VectorType::get(
2117 NewEltTy, cast<VectorType>(Args[0]->getType())->getElementCount());
2118
2119 IntrinsicCostAttributes StepVecAttrs(Intrinsic::stepvector, NewVecTy, {},
2120 FMF);
2122 thisT()->getIntrinsicInstrCost(StepVecAttrs, CostKind);
2123
2124 Cost +=
2125 thisT()->getArithmeticInstrCost(Instruction::Sub, NewVecTy, CostKind);
2126 Cost += thisT()->getCastInstrCost(Instruction::SExt, NewVecTy,
2127 Args[0]->getType(),
2129 Cost +=
2130 thisT()->getArithmeticInstrCost(Instruction::And, NewVecTy, CostKind);
2131
2132 IntrinsicCostAttributes ReducAttrs(Intrinsic::vector_reduce_umax,
2133 NewEltTy, NewVecTy, FMF, I, 1);
2134 Cost += thisT()->getTypeBasedIntrinsicInstrCost(ReducAttrs, CostKind);
2135 Cost +=
2136 thisT()->getArithmeticInstrCost(Instruction::Sub, NewEltTy, CostKind);
2137
2138 return Cost;
2139 }
2140 case Intrinsic::get_active_lane_mask:
2141 case Intrinsic::experimental_vector_match:
2142 case Intrinsic::experimental_vector_histogram_add:
2143 case Intrinsic::experimental_vector_histogram_uadd_sat:
2144 case Intrinsic::experimental_vector_histogram_umax:
2145 case Intrinsic::experimental_vector_histogram_umin:
2146 return thisT()->getTypeBasedIntrinsicInstrCost(ICA, CostKind);
2147 case Intrinsic::modf:
2148 case Intrinsic::sincos:
2149 case Intrinsic::sincospi: {
2150 std::optional<unsigned> CallRetElementIndex;
2151 // The first element of the modf result is returned by value in the
2152 // libcall.
2153 if (ICA.getID() == Intrinsic::modf)
2154 CallRetElementIndex = 0;
2155
2156 if (auto Cost = getMultipleResultIntrinsicVectorLibCallCost(
2157 ICA, CostKind, CallRetElementIndex))
2158 return *Cost;
2159 // Otherwise, fallback to default scalarization cost.
2160 break;
2161 }
2162 case Intrinsic::loop_dependence_war_mask:
2163 case Intrinsic::loop_dependence_raw_mask: {
2164 // Compute the cost of the expanded version of these intrinsics:
2165 //
2166 // The possible expansions are...
2167 //
2168 // loop_dependence_war_mask:
2169 // diff = (ptrB - ptrA) / eltSize
2170 // cmp = icmp sle diff, 0
2171 // upper_bound = select cmp, -1, diff
2172 // mask = get_active_lane_mask 0, upper_bound
2173 //
2174 // loop_dependence_raw_mask:
2175 // diff = (abs(ptrB - ptrA)) / eltSize
2176 // cmp = icmp eq diff, 0
2177 // upper_bound = select cmp, -1, diff
2178 // mask = get_active_lane_mask 0, upper_bound
2179 //
2180 auto *PtrTy = cast<PointerType>(ICA.getArgTypes()[0]);
2181 Type *IntPtrTy = IntegerType::getIntNTy(
2182 RetTy->getContext(), thisT()->getDataLayout().getPointerSizeInBits(
2183 PtrTy->getAddressSpace()));
2184 bool IsReadAfterWrite = IID == Intrinsic::loop_dependence_raw_mask;
2185
2187 thisT()->getArithmeticInstrCost(Instruction::Sub, IntPtrTy, CostKind);
2188 if (IsReadAfterWrite) {
2189 IntrinsicCostAttributes AbsAttrs(Intrinsic::abs, IntPtrTy, {IntPtrTy},
2190 {});
2191 Cost += thisT()->getIntrinsicInstrCost(AbsAttrs, CostKind);
2192 }
2193
2194 TTI::OperandValueInfo EltSizeOpInfo =
2195 TTI::getOperandInfo(ICA.getArgs()[2]);
2196 Cost += thisT()->getArithmeticInstrCost(Instruction::SDiv, IntPtrTy,
2197 CostKind, {}, EltSizeOpInfo);
2198
2199 Type *CondTy = IntegerType::getInt1Ty(RetTy->getContext());
2200 CmpInst::Predicate Pred =
2201 IsReadAfterWrite ? CmpInst::ICMP_EQ : CmpInst::ICMP_SLE;
2202 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, CondTy,
2203 IntPtrTy, Pred, CostKind);
2204 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, IntPtrTy,
2205 CondTy, Pred, CostKind);
2206
2207 IntrinsicCostAttributes Attrs(Intrinsic::get_active_lane_mask, RetTy,
2208 {IntPtrTy, IntPtrTy}, FMF);
2209 Cost += thisT()->getIntrinsicInstrCost(Attrs, CostKind);
2210 return Cost;
2211 }
2212 }
2213
2214 // Assume that we need to scalarize this intrinsic.)
2215 // Compute the scalarization overhead based on Args for a vector
2216 // intrinsic.
2217 InstructionCost ScalarizationCost = InstructionCost::getInvalid();
2218 if (RetVF.isVector() && !RetVF.isScalable()) {
2219 ScalarizationCost = 0;
2220 if (!RetTy->isVoidTy()) {
2221 for (Type *VectorTy : getContainedTypes(RetTy)) {
2222 ScalarizationCost += getScalarizationOverhead(
2223 cast<VectorType>(VectorTy),
2224 /*Insert=*/true, /*Extract=*/false, CostKind);
2225 }
2226 }
2227 ScalarizationCost += getOperandsScalarizationOverhead(
2228 filterConstantAndDuplicatedOperands(Args, ICA.getArgTypes()),
2229 CostKind);
2230 }
2231
2232 IntrinsicCostAttributes Attrs(IID, RetTy, ICA.getArgTypes(), FMF, I,
2233 ScalarizationCost);
2234 return thisT()->getTypeBasedIntrinsicInstrCost(Attrs, CostKind);
2235 }
2236
2237 /// Get intrinsic cost based on argument types.
2238 /// If ScalarizationCostPassed is std::numeric_limits<unsigned>::max(), the
2239 /// cost of scalarizing the arguments and the return value will be computed
2240 /// based on types.
2244 Intrinsic::ID IID = ICA.getID();
2245 Type *RetTy = ICA.getReturnType();
2246 const SmallVectorImpl<Type *> &Tys = ICA.getArgTypes();
2247 FastMathFlags FMF = ICA.getFlags();
2248 InstructionCost ScalarizationCostPassed = ICA.getScalarizationCost();
2249 bool SkipScalarizationCost = ICA.skipScalarizationCost();
2250
2251 VectorType *VecOpTy = nullptr;
2252 if (!Tys.empty()) {
2253 // The vector reduction operand is operand 0 except for fadd/fmul.
2254 // Their operand 0 is a scalar start value, so the vector op is operand 1.
2255 unsigned VecTyIndex = 0;
2256 if (IID == Intrinsic::vector_reduce_fadd ||
2257 IID == Intrinsic::vector_reduce_fmul)
2258 VecTyIndex = 1;
2259 assert(Tys.size() > VecTyIndex && "Unexpected IntrinsicCostAttributes");
2260 VecOpTy = dyn_cast<VectorType>(Tys[VecTyIndex]);
2261 }
2262
2263 // Library call cost - other than size, make it expensive.
2264 unsigned SingleCallCost = CostKind == TTI::TCK_CodeSize ? 1 : 10;
2265 unsigned ISD = 0;
2266 switch (IID) {
2267 default: {
2268 // Scalable vectors cannot be scalarized, so return Invalid.
2269 if (isa<ScalableVectorType>(RetTy) || any_of(Tys, [](const Type *Ty) {
2270 return isa<ScalableVectorType>(Ty);
2271 }))
2273
2274 // Assume that we need to scalarize this intrinsic.
2275 InstructionCost ScalarizationCost =
2276 SkipScalarizationCost ? ScalarizationCostPassed : 0;
2277 unsigned ScalarCalls = 1;
2278 Type *ScalarRetTy = RetTy;
2279 if (auto *RetVTy = dyn_cast<VectorType>(RetTy)) {
2280 if (!SkipScalarizationCost)
2281 ScalarizationCost = getScalarizationOverhead(
2282 RetVTy, /*Insert*/ true, /*Extract*/ false, CostKind);
2283 ScalarCalls = std::max(ScalarCalls,
2285 ScalarRetTy = RetTy->getScalarType();
2286 }
2287 SmallVector<Type *, 4> ScalarTys;
2288 for (Type *Ty : Tys) {
2289 if (auto *VTy = dyn_cast<VectorType>(Ty)) {
2290 if (!SkipScalarizationCost)
2291 ScalarizationCost += getScalarizationOverhead(
2292 VTy, /*Insert*/ false, /*Extract*/ true, CostKind);
2293 ScalarCalls = std::max(ScalarCalls,
2295 Ty = Ty->getScalarType();
2296 }
2297 ScalarTys.push_back(Ty);
2298 }
2299 if (ScalarCalls == 1)
2300 return 1; // Return cost of a scalar intrinsic. Assume it to be cheap.
2301
2302 IntrinsicCostAttributes ScalarAttrs(IID, ScalarRetTy, ScalarTys, FMF);
2303 InstructionCost ScalarCost =
2304 thisT()->getIntrinsicInstrCost(ScalarAttrs, CostKind);
2305
2306 return ScalarCalls * ScalarCost + ScalarizationCost;
2307 }
2308 // Look for intrinsics that can be lowered directly or turned into a scalar
2309 // intrinsic call.
2310 case Intrinsic::sqrt:
2311 ISD = ISD::FSQRT;
2312 break;
2313 case Intrinsic::sin:
2314 ISD = ISD::FSIN;
2315 break;
2316 case Intrinsic::cos:
2317 ISD = ISD::FCOS;
2318 break;
2319 case Intrinsic::sincos:
2320 ISD = ISD::FSINCOS;
2321 break;
2322 case Intrinsic::sincospi:
2324 break;
2325 case Intrinsic::modf:
2326 ISD = ISD::FMODF;
2327 break;
2328 case Intrinsic::tan:
2329 ISD = ISD::FTAN;
2330 break;
2331 case Intrinsic::asin:
2332 ISD = ISD::FASIN;
2333 break;
2334 case Intrinsic::acos:
2335 ISD = ISD::FACOS;
2336 break;
2337 case Intrinsic::atan:
2338 ISD = ISD::FATAN;
2339 break;
2340 case Intrinsic::atan2:
2341 ISD = ISD::FATAN2;
2342 break;
2343 case Intrinsic::sinh:
2344 ISD = ISD::FSINH;
2345 break;
2346 case Intrinsic::cosh:
2347 ISD = ISD::FCOSH;
2348 break;
2349 case Intrinsic::tanh:
2350 ISD = ISD::FTANH;
2351 break;
2352 case Intrinsic::exp:
2353 ISD = ISD::FEXP;
2354 break;
2355 case Intrinsic::exp2:
2356 ISD = ISD::FEXP2;
2357 break;
2358 case Intrinsic::exp10:
2359 ISD = ISD::FEXP10;
2360 break;
2361 case Intrinsic::log:
2362 ISD = ISD::FLOG;
2363 break;
2364 case Intrinsic::log10:
2365 ISD = ISD::FLOG10;
2366 break;
2367 case Intrinsic::log2:
2368 ISD = ISD::FLOG2;
2369 break;
2370 case Intrinsic::ldexp:
2371 ISD = ISD::FLDEXP;
2372 break;
2373 case Intrinsic::fabs:
2374 ISD = ISD::FABS;
2375 break;
2376 case Intrinsic::canonicalize:
2378 break;
2379 case Intrinsic::minnum:
2380 ISD = ISD::FMINNUM;
2381 break;
2382 case Intrinsic::maxnum:
2383 ISD = ISD::FMAXNUM;
2384 break;
2385 case Intrinsic::minimum:
2387 break;
2388 case Intrinsic::maximum:
2390 break;
2391 case Intrinsic::minimumnum:
2393 break;
2394 case Intrinsic::maximumnum:
2396 break;
2397 case Intrinsic::copysign:
2399 break;
2400 case Intrinsic::floor:
2401 ISD = ISD::FFLOOR;
2402 break;
2403 case Intrinsic::ceil:
2404 ISD = ISD::FCEIL;
2405 break;
2406 case Intrinsic::trunc:
2407 ISD = ISD::FTRUNC;
2408 break;
2409 case Intrinsic::nearbyint:
2411 break;
2412 case Intrinsic::rint:
2413 ISD = ISD::FRINT;
2414 break;
2415 case Intrinsic::lrint:
2416 ISD = ISD::LRINT;
2417 break;
2418 case Intrinsic::llrint:
2419 ISD = ISD::LLRINT;
2420 break;
2421 case Intrinsic::round:
2422 ISD = ISD::FROUND;
2423 break;
2424 case Intrinsic::roundeven:
2426 break;
2427 case Intrinsic::lround:
2428 ISD = ISD::LROUND;
2429 break;
2430 case Intrinsic::llround:
2431 ISD = ISD::LLROUND;
2432 break;
2433 case Intrinsic::pow:
2434 ISD = ISD::FPOW;
2435 break;
2436 case Intrinsic::fma:
2437 ISD = ISD::FMA;
2438 break;
2439 case Intrinsic::fmuladd:
2440 ISD = ISD::FMA;
2441 break;
2442 case Intrinsic::experimental_constrained_fmuladd:
2444 break;
2445 // FIXME: We should return 0 whenever getIntrinsicCost == TCC_Free.
2446 case Intrinsic::lifetime_start:
2447 case Intrinsic::lifetime_end:
2448 case Intrinsic::sideeffect:
2449 case Intrinsic::pseudoprobe:
2450 case Intrinsic::arithmetic_fence:
2451 return 0;
2452 case Intrinsic::masked_store: {
2453 Type *Ty = Tys[0];
2454 Align TyAlign = thisT()->DL.getABITypeAlign(Ty);
2455 return thisT()->getMemIntrinsicInstrCost(
2456 MemIntrinsicCostAttributes(IID, Ty, TyAlign, 0), CostKind);
2457 }
2458 case Intrinsic::masked_load: {
2459 Type *Ty = RetTy;
2460 Align TyAlign = thisT()->DL.getABITypeAlign(Ty);
2461 return thisT()->getMemIntrinsicInstrCost(
2462 MemIntrinsicCostAttributes(IID, Ty, TyAlign, 0), CostKind);
2463 }
2464 case Intrinsic::experimental_vp_strided_store: {
2465 auto *Ty = cast<VectorType>(ICA.getArgTypes()[0]);
2466 Align Alignment = thisT()->DL.getABITypeAlign(Ty->getElementType());
2467 return thisT()->getMemIntrinsicInstrCost(
2468 MemIntrinsicCostAttributes(IID, Ty, /*Ptr=*/nullptr,
2469 /*VariableMask=*/true, Alignment,
2470 ICA.getInst()),
2471 CostKind);
2472 }
2473 case Intrinsic::experimental_vp_strided_load: {
2474 auto *Ty = cast<VectorType>(ICA.getReturnType());
2475 Align Alignment = thisT()->DL.getABITypeAlign(Ty->getElementType());
2476 return thisT()->getMemIntrinsicInstrCost(
2477 MemIntrinsicCostAttributes(IID, Ty, /*Ptr=*/nullptr,
2478 /*VariableMask=*/true, Alignment,
2479 ICA.getInst()),
2480 CostKind);
2481 }
2482 case Intrinsic::vector_reduce_add:
2483 case Intrinsic::vector_reduce_mul:
2484 case Intrinsic::vector_reduce_and:
2485 case Intrinsic::vector_reduce_or:
2486 case Intrinsic::vector_reduce_xor:
2487 return thisT()->getArithmeticReductionCost(
2488 getArithmeticReductionInstruction(IID), VecOpTy, std::nullopt,
2489 CostKind);
2490 case Intrinsic::vector_reduce_fadd:
2491 case Intrinsic::vector_reduce_fmul:
2492 return thisT()->getArithmeticReductionCost(
2493 getArithmeticReductionInstruction(IID), VecOpTy, FMF, CostKind);
2494 case Intrinsic::vector_reduce_smax:
2495 case Intrinsic::vector_reduce_smin:
2496 case Intrinsic::vector_reduce_umax:
2497 case Intrinsic::vector_reduce_umin:
2498 case Intrinsic::vector_reduce_fmax:
2499 case Intrinsic::vector_reduce_fmin:
2500 case Intrinsic::vector_reduce_fmaximum:
2501 case Intrinsic::vector_reduce_fminimum:
2502 return thisT()->getMinMaxReductionCost(getMinMaxReductionIntrinsicOp(IID),
2503 VecOpTy, ICA.getFlags(), CostKind);
2504 case Intrinsic::experimental_vector_match: {
2505 auto *SearchTy = cast<VectorType>(ICA.getArgTypes()[0]);
2506 auto *NeedleTy = cast<FixedVectorType>(ICA.getArgTypes()[1]);
2507 unsigned SearchSize = NeedleTy->getNumElements();
2508
2509 // If we're not expanding the intrinsic then we assume this is cheap to
2510 // implement.
2511 EVT SearchVT = getTLI()->getValueType(DL, SearchTy);
2512 if (!getTLI()->shouldExpandVectorMatch(SearchVT, SearchSize))
2513 return getTypeLegalizationCost(RetTy).first;
2514
2515 // Approximate the cost based on the expansion code in
2516 // SelectionDAGBuilder.
2518 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, NeedleTy,
2519 CostKind, 1, nullptr, nullptr);
2520 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, SearchTy,
2521 CostKind, 0, nullptr, nullptr);
2522 Cost += thisT()->getShuffleCost(TTI::SK_Broadcast, SearchTy, SearchTy, {},
2523 CostKind, 0, nullptr);
2524 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, SearchTy, RetTy,
2526 Cost +=
2527 thisT()->getArithmeticInstrCost(BinaryOperator::Or, RetTy, CostKind);
2528 Cost *= SearchSize;
2529 Cost +=
2530 thisT()->getArithmeticInstrCost(BinaryOperator::And, RetTy, CostKind);
2531 return Cost;
2532 }
2533 case Intrinsic::vector_reverse:
2534 return thisT()->getShuffleCost(TTI::SK_Reverse, cast<VectorType>(RetTy),
2535 cast<VectorType>(ICA.getArgTypes()[0]), {},
2536 CostKind, 0, cast<VectorType>(RetTy));
2537 case Intrinsic::experimental_vector_histogram_add:
2538 case Intrinsic::experimental_vector_histogram_uadd_sat:
2539 case Intrinsic::experimental_vector_histogram_umax:
2540 case Intrinsic::experimental_vector_histogram_umin: {
2542 Type *EltTy = ICA.getArgTypes()[1];
2543
2544 // Targets with scalable vectors must handle this on their own.
2545 if (!PtrsTy)
2547
2548 Align Alignment = thisT()->DL.getABITypeAlign(EltTy);
2550 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, PtrsTy,
2551 CostKind, 1, nullptr, nullptr);
2552 Cost += thisT()->getMemoryOpCost(Instruction::Load, EltTy, Alignment, 0,
2553 CostKind);
2554 switch (IID) {
2555 default:
2556 llvm_unreachable("Unhandled histogram update operation.");
2557 case Intrinsic::experimental_vector_histogram_add:
2558 Cost +=
2559 thisT()->getArithmeticInstrCost(Instruction::Add, EltTy, CostKind);
2560 break;
2561 case Intrinsic::experimental_vector_histogram_uadd_sat: {
2562 IntrinsicCostAttributes UAddSat(Intrinsic::uadd_sat, EltTy, {EltTy});
2563 Cost += thisT()->getIntrinsicInstrCost(UAddSat, CostKind);
2564 break;
2565 }
2566 case Intrinsic::experimental_vector_histogram_umax: {
2567 IntrinsicCostAttributes UMax(Intrinsic::umax, EltTy, {EltTy});
2568 Cost += thisT()->getIntrinsicInstrCost(UMax, CostKind);
2569 break;
2570 }
2571 case Intrinsic::experimental_vector_histogram_umin: {
2572 IntrinsicCostAttributes UMin(Intrinsic::umin, EltTy, {EltTy});
2573 Cost += thisT()->getIntrinsicInstrCost(UMin, CostKind);
2574 break;
2575 }
2576 }
2577 Cost += thisT()->getMemoryOpCost(Instruction::Store, EltTy, Alignment, 0,
2578 CostKind);
2579 Cost *= PtrsTy->getNumElements();
2580 return Cost;
2581 }
2582 case Intrinsic::get_active_lane_mask: {
2583 Type *ArgTy = ICA.getArgTypes()[0];
2584 EVT ResVT = getTLI()->getValueType(DL, RetTy, true);
2585 EVT ArgVT = getTLI()->getValueType(DL, ArgTy, true);
2586
2587 // If we're not expanding the intrinsic then we assume this is cheap
2588 // to implement.
2589 if (!getTLI()->shouldExpandGetActiveLaneMask(ResVT, ArgVT))
2590 return getTypeLegalizationCost(RetTy).first;
2591
2592 // Create the expanded types that will be used to calculate the uadd_sat
2593 // operation.
2594 Type *ExpRetTy =
2595 VectorType::get(ArgTy, cast<VectorType>(RetTy)->getElementCount());
2596 IntrinsicCostAttributes Attrs(Intrinsic::uadd_sat, ExpRetTy, {}, FMF);
2598 thisT()->getTypeBasedIntrinsicInstrCost(Attrs, CostKind);
2599 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, ExpRetTy, RetTy,
2601 return Cost;
2602 }
2603 case Intrinsic::experimental_memset_pattern:
2604 // This cost is set to match the cost of the memset_pattern16 libcall.
2605 // It should likely be re-evaluated after migration to this intrinsic
2606 // is complete.
2607 return TTI::TCC_Basic * 4;
2608 case Intrinsic::abs:
2609 ISD = ISD::ABS;
2610 break;
2611 case Intrinsic::fshl:
2612 ISD = ISD::FSHL;
2613 break;
2614 case Intrinsic::fshr:
2615 ISD = ISD::FSHR;
2616 break;
2617 case Intrinsic::smax:
2618 ISD = ISD::SMAX;
2619 break;
2620 case Intrinsic::smin:
2621 ISD = ISD::SMIN;
2622 break;
2623 case Intrinsic::umax:
2624 ISD = ISD::UMAX;
2625 break;
2626 case Intrinsic::umin:
2627 ISD = ISD::UMIN;
2628 break;
2629 case Intrinsic::sadd_sat:
2630 ISD = ISD::SADDSAT;
2631 break;
2632 case Intrinsic::ssub_sat:
2633 ISD = ISD::SSUBSAT;
2634 break;
2635 case Intrinsic::uadd_sat:
2636 ISD = ISD::UADDSAT;
2637 break;
2638 case Intrinsic::usub_sat:
2639 ISD = ISD::USUBSAT;
2640 break;
2641 case Intrinsic::smul_fix:
2642 ISD = ISD::SMULFIX;
2643 break;
2644 case Intrinsic::umul_fix:
2645 ISD = ISD::UMULFIX;
2646 break;
2647 case Intrinsic::sadd_with_overflow:
2648 ISD = ISD::SADDO;
2649 break;
2650 case Intrinsic::ssub_with_overflow:
2651 ISD = ISD::SSUBO;
2652 break;
2653 case Intrinsic::uadd_with_overflow:
2654 ISD = ISD::UADDO;
2655 break;
2656 case Intrinsic::usub_with_overflow:
2657 ISD = ISD::USUBO;
2658 break;
2659 case Intrinsic::smul_with_overflow:
2660 ISD = ISD::SMULO;
2661 break;
2662 case Intrinsic::umul_with_overflow:
2663 ISD = ISD::UMULO;
2664 break;
2665 case Intrinsic::fptosi_sat:
2666 case Intrinsic::fptoui_sat: {
2667 std::pair<InstructionCost, MVT> SrcLT = getTypeLegalizationCost(Tys[0]);
2668 std::pair<InstructionCost, MVT> RetLT = getTypeLegalizationCost(RetTy);
2669
2670 // For cast instructions, types are different between source and
2671 // destination. Also need to check if the source type can be legalize.
2672 if (!SrcLT.first.isValid() || !RetLT.first.isValid())
2674 ISD = IID == Intrinsic::fptosi_sat ? ISD::FP_TO_SINT_SAT
2676 break;
2677 }
2678 case Intrinsic::ctpop:
2679 ISD = ISD::CTPOP;
2680 // In case of legalization use TCC_Expensive. This is cheaper than a
2681 // library call but still not a cheap instruction.
2682 SingleCallCost = TargetTransformInfo::TCC_Expensive;
2683 break;
2684 case Intrinsic::ctlz:
2685 ISD = ISD::CTLZ;
2686 break;
2687 case Intrinsic::cttz:
2688 ISD = ISD::CTTZ;
2689 break;
2690 case Intrinsic::bswap:
2691 ISD = ISD::BSWAP;
2692 break;
2693 case Intrinsic::bitreverse:
2695 break;
2696 case Intrinsic::ucmp:
2697 ISD = ISD::UCMP;
2698 break;
2699 case Intrinsic::scmp:
2700 ISD = ISD::SCMP;
2701 break;
2702 case Intrinsic::clmul:
2703 ISD = ISD::CLMUL;
2704 break;
2705 }
2706
2707 auto *ST = dyn_cast<StructType>(RetTy);
2708 Type *LegalizeTy = ST ? ST->getContainedType(0) : RetTy;
2709 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(LegalizeTy);
2710
2711 const TargetLoweringBase *TLI = getTLI();
2712
2713 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
2714 if (IID == Intrinsic::fabs && LT.second.isFloatingPoint() &&
2715 TLI->isFAbsFree(LT.second)) {
2716 return 0;
2717 }
2718
2719 // The operation is legal. Assume it costs 1.
2720 // If the type is split to multiple registers, assume that there is some
2721 // overhead to this.
2722 // TODO: Once we have extract/insert subvector cost we need to use them.
2723 if (LT.first > 1)
2724 return (LT.first * 2);
2725 else
2726 return (LT.first * 1);
2727 } else if (TLI->isOperationCustom(ISD, LT.second)) {
2728 // If the operation is custom lowered then assume
2729 // that the code is twice as expensive.
2730 return (LT.first * 2);
2731 }
2732
2733 switch (IID) {
2734 case Intrinsic::fmuladd: {
2735 // If we can't lower fmuladd into an FMA estimate the cost as a floating
2736 // point mul followed by an add.
2737
2738 return thisT()->getArithmeticInstrCost(BinaryOperator::FMul, RetTy,
2739 CostKind) +
2740 thisT()->getArithmeticInstrCost(BinaryOperator::FAdd, RetTy,
2741 CostKind);
2742 }
2743 case Intrinsic::experimental_constrained_fmuladd: {
2744 IntrinsicCostAttributes FMulAttrs(
2745 Intrinsic::experimental_constrained_fmul, RetTy, Tys);
2746 IntrinsicCostAttributes FAddAttrs(
2747 Intrinsic::experimental_constrained_fadd, RetTy, Tys);
2748 return thisT()->getIntrinsicInstrCost(FMulAttrs, CostKind) +
2749 thisT()->getIntrinsicInstrCost(FAddAttrs, CostKind);
2750 }
2751 case Intrinsic::smin:
2752 case Intrinsic::smax:
2753 case Intrinsic::umin:
2754 case Intrinsic::umax: {
2755 // minmax(X,Y) = select(icmp(X,Y),X,Y)
2756 Type *CondTy = RetTy->getWithNewBitWidth(1);
2757 bool IsUnsigned = IID == Intrinsic::umax || IID == Intrinsic::umin;
2758 CmpInst::Predicate Pred =
2759 IsUnsigned ? CmpInst::ICMP_UGT : CmpInst::ICMP_SGT;
2761 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2762 Pred, CostKind);
2763 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2764 Pred, CostKind);
2765 return Cost;
2766 }
2767 case Intrinsic::sadd_with_overflow:
2768 case Intrinsic::ssub_with_overflow: {
2769 Type *SumTy = RetTy->getContainedType(0);
2770 Type *OverflowTy = RetTy->getContainedType(1);
2771 unsigned Opcode = IID == Intrinsic::sadd_with_overflow
2772 ? BinaryOperator::Add
2773 : BinaryOperator::Sub;
2774
2775 // Add:
2776 // Overflow -> (Result < LHS) ^ (RHS < 0)
2777 // Sub:
2778 // Overflow -> (Result < LHS) ^ (RHS > 0)
2780 Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy, CostKind);
2781 Cost +=
2782 2 * thisT()->getCmpSelInstrCost(Instruction::ICmp, SumTy, OverflowTy,
2784 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::Xor, OverflowTy,
2785 CostKind);
2786 return Cost;
2787 }
2788 case Intrinsic::uadd_with_overflow:
2789 case Intrinsic::usub_with_overflow: {
2790 Type *SumTy = RetTy->getContainedType(0);
2791 Type *OverflowTy = RetTy->getContainedType(1);
2792 unsigned Opcode = IID == Intrinsic::uadd_with_overflow
2793 ? BinaryOperator::Add
2794 : BinaryOperator::Sub;
2795 CmpInst::Predicate Pred = IID == Intrinsic::uadd_with_overflow
2798
2800 Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy, CostKind);
2801 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, SumTy,
2802 OverflowTy, Pred, CostKind);
2803 return Cost;
2804 }
2805 case Intrinsic::smul_with_overflow:
2806 case Intrinsic::umul_with_overflow: {
2807 Type *MulTy = RetTy->getContainedType(0);
2808 Type *OverflowTy = RetTy->getContainedType(1);
2809 unsigned ExtSize = MulTy->getScalarSizeInBits() * 2;
2810 Type *ExtTy = MulTy->getWithNewBitWidth(ExtSize);
2811 bool IsSigned = IID == Intrinsic::smul_with_overflow;
2812
2813 unsigned ExtOp = IsSigned ? Instruction::SExt : Instruction::ZExt;
2815
2817 Cost += 2 * thisT()->getCastInstrCost(ExtOp, ExtTy, MulTy, CCH, CostKind);
2818 Cost +=
2819 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy, CostKind);
2820 Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc, MulTy, ExtTy,
2821 CCH, CostKind);
2822 Cost += thisT()->getArithmeticInstrCost(
2823 Instruction::LShr, ExtTy, CostKind, {TTI::OK_AnyValue, TTI::OP_None},
2825
2826 if (IsSigned)
2827 Cost += thisT()->getArithmeticInstrCost(
2828 Instruction::AShr, MulTy, CostKind,
2831
2832 Cost += thisT()->getCmpSelInstrCost(
2833 BinaryOperator::ICmp, MulTy, OverflowTy, CmpInst::ICMP_NE, CostKind);
2834 return Cost;
2835 }
2836 case Intrinsic::sadd_sat:
2837 case Intrinsic::ssub_sat: {
2838 // Assume a default expansion.
2839 Type *CondTy = RetTy->getWithNewBitWidth(1);
2840
2841 Type *OpTy = StructType::create({RetTy, CondTy});
2842 Intrinsic::ID OverflowOp = IID == Intrinsic::sadd_sat
2843 ? Intrinsic::sadd_with_overflow
2844 : Intrinsic::ssub_with_overflow;
2846
2847 // SatMax -> Overflow && SumDiff < 0
2848 // SatMin -> Overflow && SumDiff >= 0
2850 IntrinsicCostAttributes Attrs(OverflowOp, OpTy, {RetTy, RetTy}, FMF,
2851 nullptr, ScalarizationCostPassed);
2852 Cost += thisT()->getIntrinsicInstrCost(Attrs, CostKind);
2853 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2854 Pred, CostKind);
2855 Cost += 2 * thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy,
2856 CondTy, Pred, CostKind);
2857 return Cost;
2858 }
2859 case Intrinsic::uadd_sat:
2860 case Intrinsic::usub_sat: {
2861 Type *CondTy = RetTy->getWithNewBitWidth(1);
2862
2863 Type *OpTy = StructType::create({RetTy, CondTy});
2864 Intrinsic::ID OverflowOp = IID == Intrinsic::uadd_sat
2865 ? Intrinsic::uadd_with_overflow
2866 : Intrinsic::usub_with_overflow;
2867
2869 IntrinsicCostAttributes Attrs(OverflowOp, OpTy, {RetTy, RetTy}, FMF,
2870 nullptr, ScalarizationCostPassed);
2871 Cost += thisT()->getIntrinsicInstrCost(Attrs, CostKind);
2872 Cost +=
2873 thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2875 return Cost;
2876 }
2877 case Intrinsic::smul_fix:
2878 case Intrinsic::umul_fix: {
2879 unsigned ExtSize = RetTy->getScalarSizeInBits() * 2;
2880 Type *ExtTy = RetTy->getWithNewBitWidth(ExtSize);
2881
2882 unsigned ExtOp =
2883 IID == Intrinsic::smul_fix ? Instruction::SExt : Instruction::ZExt;
2885
2887 Cost += 2 * thisT()->getCastInstrCost(ExtOp, ExtTy, RetTy, CCH, CostKind);
2888 Cost +=
2889 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy, CostKind);
2890 Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc, RetTy, ExtTy,
2891 CCH, CostKind);
2892 Cost += thisT()->getArithmeticInstrCost(
2893 Instruction::LShr, RetTy, CostKind, {TTI::OK_AnyValue, TTI::OP_None},
2895 Cost += thisT()->getArithmeticInstrCost(
2896 Instruction::Shl, RetTy, CostKind, {TTI::OK_AnyValue, TTI::OP_None},
2898 Cost += thisT()->getArithmeticInstrCost(Instruction::Or, RetTy, CostKind);
2899 return Cost;
2900 }
2901 case Intrinsic::abs: {
2902 // abs(X) = select(icmp(X,0),X,sub(0,X))
2903 Type *CondTy = RetTy->getWithNewBitWidth(1);
2906 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2907 Pred, CostKind);
2908 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2909 Pred, CostKind);
2910 // TODO: Should we add an OperandValueProperties::OP_Zero property?
2911 Cost += thisT()->getArithmeticInstrCost(
2912 BinaryOperator::Sub, RetTy, CostKind,
2914 return Cost;
2915 }
2916 case Intrinsic::fshl:
2917 case Intrinsic::fshr: {
2918 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
2919 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
2920 Type *CondTy = RetTy->getWithNewBitWidth(1);
2922 Cost +=
2923 thisT()->getArithmeticInstrCost(BinaryOperator::Or, RetTy, CostKind);
2924 Cost +=
2925 thisT()->getArithmeticInstrCost(BinaryOperator::Sub, RetTy, CostKind);
2926 Cost +=
2927 thisT()->getArithmeticInstrCost(BinaryOperator::Shl, RetTy, CostKind);
2928 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::LShr, RetTy,
2929 CostKind);
2930 // Non-constant shift amounts requires a modulo. If the typesize is a
2931 // power-2 then this will be converted to an and, otherwise it will use a
2932 // urem.
2933 Cost += thisT()->getArithmeticInstrCost(
2934 isPowerOf2_32(RetTy->getScalarSizeInBits()) ? BinaryOperator::And
2935 : BinaryOperator::URem,
2936 RetTy, CostKind, {TTI::OK_AnyValue, TTI::OP_None},
2937 {TTI::OK_UniformConstantValue, TTI::OP_None});
2938 // Shift-by-zero handling.
2939 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2941 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2943 return Cost;
2944 }
2945 case Intrinsic::fptosi_sat:
2946 case Intrinsic::fptoui_sat: {
2947 if (Tys.empty())
2948 break;
2949 Type *FromTy = Tys[0];
2950 bool IsSigned = IID == Intrinsic::fptosi_sat;
2951
2953 IntrinsicCostAttributes Attrs1(Intrinsic::minnum, FromTy,
2954 {FromTy, FromTy});
2955 Cost += thisT()->getIntrinsicInstrCost(Attrs1, CostKind);
2956 IntrinsicCostAttributes Attrs2(Intrinsic::maxnum, FromTy,
2957 {FromTy, FromTy});
2958 Cost += thisT()->getIntrinsicInstrCost(Attrs2, CostKind);
2959 Cost += thisT()->getCastInstrCost(
2960 IsSigned ? Instruction::FPToSI : Instruction::FPToUI, RetTy, FromTy,
2962 if (IsSigned) {
2963 Type *CondTy = RetTy->getWithNewBitWidth(1);
2964 Cost += thisT()->getCmpSelInstrCost(
2965 BinaryOperator::FCmp, FromTy, CondTy, CmpInst::FCMP_UNO, CostKind);
2966 Cost += thisT()->getCmpSelInstrCost(
2967 BinaryOperator::Select, RetTy, CondTy, CmpInst::FCMP_UNO, CostKind);
2968 }
2969 return Cost;
2970 }
2971 case Intrinsic::ucmp:
2972 case Intrinsic::scmp: {
2973 Type *CmpTy = Tys[0];
2974 Type *CondTy = RetTy->getWithNewBitWidth(1);
2976 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, CmpTy, CondTy,
2978 CostKind) +
2979 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, CmpTy, CondTy,
2981 CostKind);
2982
2983 EVT VT = TLI->getValueType(DL, CmpTy, true);
2985 // x < y ? -1 : (x > y ? 1 : 0)
2986 Cost += 2 * thisT()->getCmpSelInstrCost(
2987 BinaryOperator::Select, RetTy, CondTy,
2989 } else {
2990 // zext(x > y) - zext(x < y)
2991 Cost +=
2992 2 * thisT()->getCastInstrCost(CastInst::ZExt, RetTy, CondTy,
2994 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::Sub, RetTy,
2995 CostKind);
2996 }
2997 return Cost;
2998 }
2999 case Intrinsic::maximumnum:
3000 case Intrinsic::minimumnum: {
3001 // On platform that support FMAXNUM_IEEE/FMINNUM_IEEE, we expand
3002 // maximumnum/minimumnum to
3003 // ARG0 = fcanonicalize ARG0, ARG0 // to quiet ARG0
3004 // ARG1 = fcanonicalize ARG1, ARG1 // to quiet ARG1
3005 // RESULT = MAXNUM_IEEE ARG0, ARG1 // or MINNUM_IEEE
3006 // FIXME: In LangRef, we claimed FMAXNUM has the same behaviour of
3007 // FMAXNUM_IEEE, while the backend hasn't migrated the code yet.
3008 // Finally, we will remove FMAXNUM_IEEE and FMINNUM_IEEE.
3009 int IeeeISD =
3010 IID == Intrinsic::maximumnum ? ISD::FMAXNUM_IEEE : ISD::FMINNUM_IEEE;
3011 if (TLI->isOperationLegal(IeeeISD, LT.second)) {
3012 IntrinsicCostAttributes FCanonicalizeAttrs(Intrinsic::canonicalize,
3013 RetTy, Tys[0]);
3014 InstructionCost FCanonicalizeCost =
3015 thisT()->getIntrinsicInstrCost(FCanonicalizeAttrs, CostKind);
3016 return LT.first + FCanonicalizeCost * 2;
3017 }
3018 break;
3019 }
3020 case Intrinsic::clmul: {
3021 // This cost model should match the expansion in
3022 // TargetLowering::expandCLMUL.
3023 InstructionCost PerBitCostMul =
3024 thisT()->getArithmeticInstrCost(Instruction::And, RetTy, CostKind) +
3025 thisT()->getArithmeticInstrCost(Instruction::Mul, RetTy, CostKind) +
3026 thisT()->getArithmeticInstrCost(Instruction::Xor, RetTy, CostKind);
3027 InstructionCost PerBitCostBittest =
3028 thisT()->getArithmeticInstrCost(Instruction::And, RetTy, CostKind) +
3029 thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, RetTy,
3031 thisT()->getCmpSelInstrCost(Instruction::ICmp, RetTy, RetTy,
3033 InstructionCost PerBitCost = std::min(PerBitCostMul, PerBitCostBittest);
3034 return RetTy->getScalarSizeInBits() * PerBitCost;
3035 }
3036 default:
3037 break;
3038 }
3039
3040 // Else, assume that we need to scalarize this intrinsic. For math builtins
3041 // this will emit a costly libcall, adding call overhead and spills. Make it
3042 // very expensive.
3043 if (isVectorizedTy(RetTy)) {
3044 ArrayRef<Type *> RetVTys = getContainedTypes(RetTy);
3045
3046 // Scalable vectors cannot be scalarized, so return Invalid.
3047 if (any_of(concat<Type *const>(RetVTys, Tys),
3048 [](Type *Ty) { return isa<ScalableVectorType>(Ty); }))
3050
3051 InstructionCost ScalarizationCost = ScalarizationCostPassed;
3052 if (!SkipScalarizationCost) {
3053 ScalarizationCost = 0;
3054 for (Type *RetVTy : RetVTys) {
3055 ScalarizationCost += getScalarizationOverhead(
3056 cast<VectorType>(RetVTy), /*Insert=*/true,
3057 /*Extract=*/false, CostKind);
3058 }
3059 }
3060
3061 unsigned ScalarCalls = getVectorizedTypeVF(RetTy).getFixedValue();
3062 SmallVector<Type *, 4> ScalarTys;
3063 for (Type *Ty : Tys) {
3064 if (Ty->isVectorTy())
3065 Ty = Ty->getScalarType();
3066 ScalarTys.push_back(Ty);
3067 }
3068 IntrinsicCostAttributes Attrs(IID, toScalarizedTy(RetTy), ScalarTys, FMF);
3069 InstructionCost ScalarCost =
3070 thisT()->getIntrinsicInstrCost(Attrs, CostKind);
3071 for (Type *Ty : Tys) {
3072 if (auto *VTy = dyn_cast<VectorType>(Ty)) {
3073 if (!ICA.skipScalarizationCost())
3074 ScalarizationCost += getScalarizationOverhead(
3075 VTy, /*Insert*/ false, /*Extract*/ true, CostKind);
3076 ScalarCalls = std::max(ScalarCalls,
3078 }
3079 }
3080 return ScalarCalls * ScalarCost + ScalarizationCost;
3081 }
3082
3083 // This is going to be turned into a library call, make it expensive.
3084 return SingleCallCost;
3085 }
3086
3087 /// Get memory intrinsic cost based on arguments.
3090 TTI::TargetCostKind CostKind) const override {
3091 unsigned Id = MICA.getID();
3092 Type *DataTy = MICA.getDataType();
3093 bool VariableMask = MICA.getVariableMask();
3094 Align Alignment = MICA.getAlignment();
3095
3096 switch (Id) {
3097 case Intrinsic::experimental_vp_strided_load:
3098 case Intrinsic::experimental_vp_strided_store: {
3099 unsigned Opcode = Id == Intrinsic::experimental_vp_strided_load
3100 ? Instruction::Load
3101 : Instruction::Store;
3102 // For a target without strided memory operations (or for an illegal
3103 // operation type on one which does), assume we lower to a gather/scatter
3104 // operation. (Which may in turn be scalarized.)
3105 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment,
3106 VariableMask, true, CostKind);
3107 }
3108 case Intrinsic::masked_scatter:
3109 case Intrinsic::masked_gather:
3110 case Intrinsic::vp_scatter:
3111 case Intrinsic::vp_gather: {
3112 unsigned Opcode = (MICA.getID() == Intrinsic::masked_gather ||
3113 MICA.getID() == Intrinsic::vp_gather)
3114 ? Instruction::Load
3115 : Instruction::Store;
3116
3117 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment,
3118 VariableMask, true, CostKind);
3119 }
3120 case Intrinsic::vp_load:
3121 case Intrinsic::vp_store:
3123 case Intrinsic::masked_load:
3124 case Intrinsic::masked_store: {
3125 unsigned Opcode =
3126 Id == Intrinsic::masked_load ? Instruction::Load : Instruction::Store;
3127 // TODO: Pass on AddressSpace when we have test coverage.
3128 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment, true, false,
3129 CostKind);
3130 }
3131 case Intrinsic::masked_compressstore:
3132 case Intrinsic::masked_expandload: {
3133 unsigned Opcode = MICA.getID() == Intrinsic::masked_expandload
3134 ? Instruction::Load
3135 : Instruction::Store;
3136 // Treat expand load/compress store as gather/scatter operation.
3137 // TODO: implement more precise cost estimation for these intrinsics.
3138 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment,
3139 VariableMask,
3140 /*IsGatherScatter*/ true, CostKind);
3141 }
3142 case Intrinsic::vp_load_ff:
3144 default:
3145 llvm_unreachable("unexpected intrinsic");
3146 }
3147 }
3148
3149 /// Compute a cost of the given call instruction.
3150 ///
3151 /// Compute the cost of calling function F with return type RetTy and
3152 /// argument types Tys. F might be nullptr, in this case the cost of an
3153 /// arbitrary call with the specified signature will be returned.
3154 /// This is used, for instance, when we estimate call of a vector
3155 /// counterpart of the given function.
3156 /// \param F Called function, might be nullptr.
3157 /// \param RetTy Return value types.
3158 /// \param Tys Argument types.
3159 /// \returns The cost of Call instruction.
3162 TTI::TargetCostKind CostKind) const override {
3163 return 10;
3164 }
3165
3166 unsigned getNumberOfParts(Type *Tp) const override {
3167 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Tp);
3168 if (!LT.first.isValid())
3169 return 0;
3170 // Try to find actual number of parts for non-power-of-2 elements as
3171 // ceil(num-of-elements/num-of-subtype-elements).
3172 if (auto *FTp = dyn_cast<FixedVectorType>(Tp);
3173 Tp && LT.second.isFixedLengthVector() &&
3174 !has_single_bit(FTp->getNumElements())) {
3175 if (auto *SubTp = dyn_cast_if_present<FixedVectorType>(
3176 EVT(LT.second).getTypeForEVT(Tp->getContext()));
3177 SubTp && SubTp->getElementType() == FTp->getElementType())
3178 return divideCeil(FTp->getNumElements(), SubTp->getNumElements());
3179 }
3180 return LT.first.getValue();
3181 }
3182
3185 TTI::TargetCostKind) const override {
3186 return 0;
3187 }
3188
3189 /// Try to calculate arithmetic and shuffle op costs for reduction intrinsics.
3190 /// We're assuming that reduction operation are performing the following way:
3191 ///
3192 /// %val1 = shufflevector<n x t> %val, <n x t> %undef,
3193 /// <n x i32> <i32 n/2, i32 n/2 + 1, ..., i32 n, i32 undef, ..., i32 undef>
3194 /// \----------------v-------------/ \----------v------------/
3195 /// n/2 elements n/2 elements
3196 /// %red1 = op <n x t> %val, <n x t> val1
3197 /// After this operation we have a vector %red1 where only the first n/2
3198 /// elements are meaningful, the second n/2 elements are undefined and can be
3199 /// dropped. All other operations are actually working with the vector of
3200 /// length n/2, not n, though the real vector length is still n.
3201 /// %val2 = shufflevector<n x t> %red1, <n x t> %undef,
3202 /// <n x i32> <i32 n/4, i32 n/4 + 1, ..., i32 n/2, i32 undef, ..., i32 undef>
3203 /// \----------------v-------------/ \----------v------------/
3204 /// n/4 elements 3*n/4 elements
3205 /// %red2 = op <n x t> %red1, <n x t> val2 - working with the vector of
3206 /// length n/2, the resulting vector has length n/4 etc.
3207 ///
3208 /// The cost model should take into account that the actual length of the
3209 /// vector is reduced on each iteration.
3212 // Targets must implement a default value for the scalable case, since
3213 // we don't know how many lanes the vector has.
3216
3217 Type *ScalarTy = Ty->getElementType();
3218 unsigned NumVecElts = cast<FixedVectorType>(Ty)->getNumElements();
3219 if ((Opcode == Instruction::Or || Opcode == Instruction::And) &&
3220 ScalarTy == IntegerType::getInt1Ty(Ty->getContext()) &&
3221 NumVecElts >= 2) {
3222 // Or reduction for i1 is represented as:
3223 // %val = bitcast <ReduxWidth x i1> to iReduxWidth
3224 // %res = cmp ne iReduxWidth %val, 0
3225 // And reduction for i1 is represented as:
3226 // %val = bitcast <ReduxWidth x i1> to iReduxWidth
3227 // %res = cmp eq iReduxWidth %val, 11111
3228 Type *ValTy = IntegerType::get(Ty->getContext(), NumVecElts);
3229 return thisT()->getCastInstrCost(Instruction::BitCast, ValTy, Ty,
3231 thisT()->getCmpSelInstrCost(Instruction::ICmp, ValTy,
3234 }
3235 unsigned NumReduxLevels = Log2_32(NumVecElts);
3236 InstructionCost ArithCost = 0;
3237 InstructionCost ShuffleCost = 0;
3238 std::pair<InstructionCost, MVT> LT = thisT()->getTypeLegalizationCost(Ty);
3239 unsigned LongVectorCount = 0;
3240 unsigned MVTLen =
3241 LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
3242 while (NumVecElts > MVTLen) {
3243 NumVecElts /= 2;
3244 VectorType *SubTy = FixedVectorType::get(ScalarTy, NumVecElts);
3245 ShuffleCost += thisT()->getShuffleCost(
3246 TTI::SK_ExtractSubvector, SubTy, Ty, {}, CostKind, NumVecElts, SubTy);
3247 ArithCost += thisT()->getArithmeticInstrCost(Opcode, SubTy, CostKind);
3248 Ty = SubTy;
3249 ++LongVectorCount;
3250 }
3251
3252 NumReduxLevels -= LongVectorCount;
3253
3254 // The minimal length of the vector is limited by the real length of vector
3255 // operations performed on the current platform. That's why several final
3256 // reduction operations are performed on the vectors with the same
3257 // architecture-dependent length.
3258
3259 // By default reductions need one shuffle per reduction level.
3260 ShuffleCost +=
3261 NumReduxLevels * thisT()->getShuffleCost(TTI::SK_PermuteSingleSrc, Ty,
3262 Ty, {}, CostKind, 0, Ty);
3263 ArithCost +=
3264 NumReduxLevels * thisT()->getArithmeticInstrCost(Opcode, Ty, CostKind);
3265 return ShuffleCost + ArithCost +
3266 thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
3267 CostKind, 0, nullptr, nullptr);
3268 }
3269
3270 /// Try to calculate the cost of performing strict (in-order) reductions,
3271 /// which involves doing a sequence of floating point additions in lane
3272 /// order, starting with an initial value. For example, consider a scalar
3273 /// initial value 'InitVal' of type float and a vector of type <4 x float>:
3274 ///
3275 /// Vector = <float %v0, float %v1, float %v2, float %v3>
3276 ///
3277 /// %add1 = %InitVal + %v0
3278 /// %add2 = %add1 + %v1
3279 /// %add3 = %add2 + %v2
3280 /// %add4 = %add3 + %v3
3281 ///
3282 /// As a simple estimate we can say the cost of such a reduction is 4 times
3283 /// the cost of a scalar FP addition. We can only estimate the costs for
3284 /// fixed-width vectors here because for scalable vectors we do not know the
3285 /// runtime number of operations.
3288 // Targets must implement a default value for the scalable case, since
3289 // we don't know how many lanes the vector has.
3292
3293 auto *VTy = cast<FixedVectorType>(Ty);
3295 VTy, /*Insert=*/false, /*Extract=*/true, CostKind);
3296 InstructionCost ArithCost = thisT()->getArithmeticInstrCost(
3297 Opcode, VTy->getElementType(), CostKind);
3298 ArithCost *= VTy->getNumElements();
3299
3300 return ExtractCost + ArithCost;
3301 }
3302
3305 std::optional<FastMathFlags> FMF,
3306 TTI::TargetCostKind CostKind) const override {
3307 assert(Ty && "Unknown reduction vector type");
3309 return getOrderedReductionCost(Opcode, Ty, CostKind);
3310 return getTreeReductionCost(Opcode, Ty, CostKind);
3311 }
3312
3313 /// Try to calculate op costs for min/max reduction operations.
3314 /// \param CondTy Conditional type for the Select instruction.
3317 TTI::TargetCostKind CostKind) const override {
3318 // Targets must implement a default value for the scalable case, since
3319 // we don't know how many lanes the vector has.
3322
3323 Type *ScalarTy = Ty->getElementType();
3324 unsigned NumVecElts = cast<FixedVectorType>(Ty)->getNumElements();
3325 unsigned NumReduxLevels = Log2_32(NumVecElts);
3326 InstructionCost MinMaxCost = 0;
3327 InstructionCost ShuffleCost = 0;
3328 std::pair<InstructionCost, MVT> LT = thisT()->getTypeLegalizationCost(Ty);
3329 unsigned LongVectorCount = 0;
3330 unsigned MVTLen =
3331 LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
3332 while (NumVecElts > MVTLen) {
3333 NumVecElts /= 2;
3334 auto *SubTy = FixedVectorType::get(ScalarTy, NumVecElts);
3335
3336 ShuffleCost += thisT()->getShuffleCost(
3337 TTI::SK_ExtractSubvector, SubTy, Ty, {}, CostKind, NumVecElts, SubTy);
3338
3339 IntrinsicCostAttributes Attrs(IID, SubTy, {SubTy, SubTy}, FMF);
3340 MinMaxCost += getIntrinsicInstrCost(Attrs, CostKind);
3341 Ty = SubTy;
3342 ++LongVectorCount;
3343 }
3344
3345 NumReduxLevels -= LongVectorCount;
3346
3347 // The minimal length of the vector is limited by the real length of vector
3348 // operations performed on the current platform. That's why several final
3349 // reduction opertions are perfomed on the vectors with the same
3350 // architecture-dependent length.
3351 ShuffleCost +=
3352 NumReduxLevels * thisT()->getShuffleCost(TTI::SK_PermuteSingleSrc, Ty,
3353 Ty, {}, CostKind, 0, Ty);
3354 IntrinsicCostAttributes Attrs(IID, Ty, {Ty, Ty}, FMF);
3355 MinMaxCost += NumReduxLevels * getIntrinsicInstrCost(Attrs, CostKind);
3356 // The last min/max should be in vector registers and we counted it above.
3357 // So just need a single extractelement.
3358 return ShuffleCost + MinMaxCost +
3359 thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
3360 CostKind, 0, nullptr, nullptr);
3361 }
3362
3364 getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy,
3365 VectorType *Ty, std::optional<FastMathFlags> FMF,
3366 TTI::TargetCostKind CostKind) const override {
3367 if (auto *FTy = dyn_cast<FixedVectorType>(Ty);
3368 FTy && IsUnsigned && Opcode == Instruction::Add &&
3369 FTy->getElementType() == IntegerType::getInt1Ty(Ty->getContext())) {
3370 // Represent vector_reduce_add(ZExt(<n x i1>)) as
3371 // ZExtOrTrunc(ctpop(bitcast <n x i1> to in)).
3372 auto *IntTy =
3373 IntegerType::get(ResTy->getContext(), FTy->getNumElements());
3374 IntrinsicCostAttributes ICA(Intrinsic::ctpop, IntTy, {IntTy},
3375 FMF ? *FMF : FastMathFlags());
3376 return thisT()->getCastInstrCost(Instruction::BitCast, IntTy, FTy,
3378 thisT()->getIntrinsicInstrCost(ICA, CostKind);
3379 }
3380 // Without any native support, this is equivalent to the cost of
3381 // vecreduce.opcode(ext(Ty A)).
3382 VectorType *ExtTy = VectorType::get(ResTy, Ty);
3383 InstructionCost RedCost =
3384 thisT()->getArithmeticReductionCost(Opcode, ExtTy, FMF, CostKind);
3385 InstructionCost ExtCost = thisT()->getCastInstrCost(
3386 IsUnsigned ? Instruction::ZExt : Instruction::SExt, ExtTy, Ty,
3388
3389 return RedCost + ExtCost;
3390 }
3391
3393 getMulAccReductionCost(bool IsUnsigned, unsigned RedOpcode, Type *ResTy,
3394 VectorType *Ty,
3395 TTI::TargetCostKind CostKind) const override {
3396 // Without any native support, this is equivalent to the cost of
3397 // vecreduce.add(mul(ext(Ty A), ext(Ty B))) or
3398 // vecreduce.add(mul(A, B)).
3399 assert((RedOpcode == Instruction::Add || RedOpcode == Instruction::Sub) &&
3400 "The reduction opcode is expected to be Add or Sub.");
3401 VectorType *ExtTy = VectorType::get(ResTy, Ty);
3402 InstructionCost RedCost = thisT()->getArithmeticReductionCost(
3403 RedOpcode, ExtTy, std::nullopt, CostKind);
3404 InstructionCost ExtCost = thisT()->getCastInstrCost(
3405 IsUnsigned ? Instruction::ZExt : Instruction::SExt, ExtTy, Ty,
3407
3408 InstructionCost MulCost =
3409 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy, CostKind);
3410
3411 return RedCost + MulCost + 2 * ExtCost;
3412 }
3413
3415
3416 /// @}
3417};
3418
3419/// Concrete BasicTTIImpl that can be used if no further customization
3420/// is needed.
3421class BasicTTIImpl : public BasicTTIImplBase<BasicTTIImpl> {
3422 using BaseT = BasicTTIImplBase<BasicTTIImpl>;
3423
3424 friend class BasicTTIImplBase<BasicTTIImpl>;
3425
3426 const TargetSubtargetInfo *ST;
3427 const TargetLoweringBase *TLI;
3428
3429 const TargetSubtargetInfo *getST() const { return ST; }
3430 const TargetLoweringBase *getTLI() const { return TLI; }
3431
3432public:
3433 explicit BasicTTIImpl(const TargetMachine *TM, const Function &F);
3434};
3435
3436} // end namespace llvm
3437
3438#endif // LLVM_CODEGEN_BASICTTIIMPL_H
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file implements the BitVector class.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
static const Function * getCalledFunction(const Value *V)
#define T
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
#define P(N)
static unsigned getNumElements(Type *Ty)
static Type * getValueType(Value *V)
Returns the type of the given value/instruction V.
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
This file describes how to lower LLVM code to machine code.
This file provides helpers for the implementation of a TargetTransformInfo-conforming class.
This pass exposes codegen information to IR-level passes.
Class for arbitrary precision integers.
Definition APInt.h:78
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:235
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
Definition APInt.h:1345
bool sgt(const APInt &RHS) const
Signed greater than comparison.
Definition APInt.h:1208
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1503
bool slt(const APInt &RHS) const
Signed less than comparison.
Definition APInt.h:1137
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition APInt.h:201
an instruction to allocate memory on the stack
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
Definition ArrayRef.h:195
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
ArrayRef< T > drop_back(size_t N=1) const
Drop the last N elements of the array.
Definition ArrayRef.h:201
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
InstructionCost getFPOpCost(Type *Ty) const override
bool preferToKeepConstantsAttached(const Instruction &Inst, const Function &Fn) const override
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false) const override
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind) const override
Try to calculate op costs for min/max reduction operations.
bool isIndexedLoadLegal(TTI::MemIndexedMode M, Type *Ty) const override
InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr, ArrayRef< const Value * > Operands, Type *AccessType, TTI::TargetCostKind CostKind) const override
unsigned getCallerAllocaCost(const CallBase *CB, const AllocaInst *AI) const override
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const override
bool shouldBuildLookupTables() const override
bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const override
bool isProfitableToHoist(Instruction *I) const override
unsigned getNumberOfParts(Type *Tp) const override
unsigned getMinPrefetchStride(unsigned NumMemAccesses, unsigned NumStridedMemAccesses, unsigned NumPrefetches, bool HasCall) const override
bool useAA() const override
unsigned getPrefetchDistance() const override
TTI::ShuffleKind improveShuffleKindFromMask(TTI::ShuffleKind Kind, ArrayRef< int > Mask, VectorType *SrcTy, int &Index, VectorType *&SubTy) const
unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy, Type *ScalarValTy) const override
InstructionCost getOperandsScalarizationOverhead(ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const override
Estimate the overhead of scalarizing an instruction's operands.
bool isLegalAddScalableImmediate(int64_t Imm) const override
unsigned getAssumedAddrSpace(const Value *V) const override
std::optional< Value * > simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed) const override
bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace, Instruction *I=nullptr, int64_t ScalableOffset=0) const override
bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const override
bool areInlineCompatible(const Function *Caller, const Function *Callee) const override
bool isIndexedStoreLegal(TTI::MemIndexedMode M, Type *Ty) const override
bool haveFastSqrt(Type *Ty) const override
bool collectFlatAddressOperands(SmallVectorImpl< int > &OpIndexes, Intrinsic::ID IID) const override
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI, unsigned &JumpTableSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) const override
Value * rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV, Value *NewV) const override
unsigned adjustInliningThreshold(const CallBase *CB) const override
unsigned getInliningThresholdMultiplier() const override
InstructionCost getScalarizationOverhead(VectorType *InTy, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, bool ForPoisonSrc=true, ArrayRef< Value * > VL={}, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const override
Estimate the overhead of scalarizing an instruction.
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Scalar, ArrayRef< std::tuple< Value *, User *, int > > ScalarUserAndIdx, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const override
int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset, int64_t MaxOffset)
bool shouldBuildRelLookupTables() const override
bool isTargetIntrinsicWithStructReturnOverloadAtField(Intrinsic::ID ID, int RetIdx) const override
InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind) const override
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
InstructionCost getVectorInstrCost(const Instruction &I, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const override
InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, StackOffset BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace) const override
unsigned getEpilogueVectorizationMinVF() const override
InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index, TTI::TargetCostKind CostKind) const override
InstructionCost getVectorSplitCost() const
bool isTruncateFree(Type *Ty1, Type *Ty2) const override
std::optional< unsigned > getMaxVScale() const override
unsigned getFlatAddressSpace() const override
InstructionCost getCallInstrCost(Function *F, Type *RetTy, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const override
Compute a cost of the given call instruction.
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE) const override
InstructionCost getTreeReductionCost(unsigned Opcode, VectorType *Ty, TTI::TargetCostKind CostKind) const
Try to calculate arithmetic and shuffle op costs for reduction intrinsics.
~BasicTTIImplBase() override=default
std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const override
unsigned getMaxPrefetchIterationsAhead() const override
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP) const override
InstructionCost getTypeBasedIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const
Get intrinsic cost based on argument types.
bool hasBranchDivergence(const Function *F=nullptr) const override
InstructionCost getOrderedReductionCost(unsigned Opcode, VectorType *Ty, TTI::TargetCostKind CostKind) const
Try to calculate the cost of performing strict (in-order) reductions, which involves doing a sequence...
bool isTargetIntrinsicTriviallyScalarizable(Intrinsic::ID ID) const override
bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) const override
std::optional< unsigned > getCacheAssociativity(TargetTransformInfo::CacheLevel Level) const override
bool shouldPrefetchAddressSpace(unsigned AS) const override
bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, unsigned AddressSpace, Align Alignment, unsigned *Fast) const override
unsigned getCacheLineSize() const override
std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const override
bool shouldDropLSRSolutionIfLessProfitable() const override
int getInlinerVectorBonusPercent() const override
InstructionCost getMulAccReductionCost(bool IsUnsigned, unsigned RedOpcode, Type *ResTy, VectorType *Ty, TTI::TargetCostKind CostKind) const override
InstructionCost getIndexedVectorInstrCostFromEnd(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index) const override
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
std::pair< InstructionCost, MVT > getTypeLegalizationCost(Type *Ty) const
Estimate the cost of type-legalization and the legalized type.
bool isLegalAddImmediate(int64_t imm) const override
InstructionCost getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF, const APInt &DemandedDstElts, TTI::TargetCostKind CostKind) const override
unsigned getMaxInterleaveFactor(ElementCount VF) const override
bool isSingleThreaded() const override
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, const Value *Op0, const Value *Op1, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const override
bool isProfitableLSRChainElement(Instruction *I) const override
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const override
bool isTargetIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID, int OpdIdx) const override
bool isTargetIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, unsigned ScalarOpdIdx) const override
std::optional< unsigned > getVScaleForTuning() const override
InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind) const override
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const override
Get intrinsic cost based on arguments.
std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) const override
InstructionCost getAddressComputationCost(Type *PtrTy, ScalarEvolution *, const SCEV *, TTI::TargetCostKind) const override
bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) const override
InstructionCost getScalarizationOverhead(VectorType *RetTy, ArrayRef< const Value * > Args, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const
Estimate the overhead of scalarizing the inputs and outputs of an instruction, with return type RetTy...
TailFoldingStyle getPreferredTailFoldingStyle() const override
std::optional< unsigned > getCacheSize(TargetTransformInfo::CacheLevel Level) const override
bool isLegalICmpImmediate(int64_t imm) const override
bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const override
unsigned getRegUsageForType(Type *Ty) const override
InstructionCost getMemIntrinsicInstrCost(const MemIntrinsicCostAttributes &MICA, TTI::TargetCostKind CostKind) const override
Get memory intrinsic cost based on arguments.
BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
bool isTypeLegal(Type *Ty) const override
bool enableWritePrefetching() const override
bool isLSRCostLess(const TTI::LSRCost &C1, const TTI::LSRCost &C2) const override
InstructionCost getScalarizationOverhead(VectorType *InTy, bool Insert, bool Extract, TTI::TargetCostKind CostKind, bool ForPoisonSrc=true, ArrayRef< Value * > VL={}, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const
Helper wrapper for the DemandedElts variant of getScalarizationOverhead.
bool isNumRegsMajorCostOfLSR() const override
BasicTTIImpl(const TargetMachine *TM, const Function &F)
size_type count() const
count - Returns the number of bits which are set.
Definition BitVector.h:181
BitVector & set()
Definition BitVector.h:370
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition InstrTypes.h:982
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:706
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:703
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition InstrTypes.h:686
static CmpInst::Predicate getGTPredicate(Intrinsic::ID ID)
static CmpInst::Predicate getLTPredicate(Intrinsic::ID ID)
This class represents a range of values.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
constexpr bool isVector() const
One or more elements.
Definition TypeSize.h:324
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition TypeSize.h:309
constexpr bool isScalar() const
Exactly one element.
Definition TypeSize.h:320
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:23
Container class for subtarget features.
Class to represent fixed width SIMD vectors.
unsigned getNumElements() const
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
Definition Type.cpp:802
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition Function.h:354
The core instruction combiner logic.
static InstructionCost getInvalid(CostType Val=0)
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:318
const SmallVectorImpl< Type * > & getArgTypes() const
const SmallVectorImpl< const Value * > & getArgs() const
InstructionCost getScalarizationCost() const
const IntrinsicInst * getInst() const
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
const FeatureBitset & getFeatureBits() const
Machine Value Type.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
Information for memory intrinsic cost model.
The optimization diagnostic interface.
LLVM_ABI void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Diagnostic information for applied optimization remarks.
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Analysis providing profile information.
This class represents an analyzed expression in the program.
The main scalar evolution driver.
static LLVM_ABI bool isZeroEltSplatMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses all elements with the same value as the first element of exa...
static LLVM_ABI bool isSpliceMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is a splice mask, concatenating the two inputs together and then ext...
static LLVM_ABI bool isSelectMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from its source vectors without lane crossings.
static LLVM_ABI bool isExtractSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is an extract subvector mask.
static LLVM_ABI bool isReverseMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask swaps the order of elements from exactly one source vector.
static LLVM_ABI bool isTransposeMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask is a transpose mask.
static LLVM_ABI bool isInsertSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &NumSubElts, int &Index)
Return true if this shuffle mask is an insert subvector mask.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
Definition TypeSize.h:30
static StackOffset getScalable(int64_t Scalable)
Definition TypeSize.h:40
static StackOffset getFixed(int64_t Fixed)
Definition TypeSize.h:39
static LLVM_ABI StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
Definition Type.cpp:619
Multiway switch.
Provides information about what library functions are available for the current target.
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
bool isOperationExpand(unsigned Op, EVT VT) const
Return true if the specified operation is illegal on this target or unlikely to be made legal with cu...
int InstructionOpcodeToISD(unsigned Opcode) const
Get the ISD node that corresponds to the Instruction class opcode.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
virtual bool preferSelectsOverBooleanArithmetic(EVT VT) const
Should we prefer selects to doing arithmetic on boolean types.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases, uint64_t Range, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) const
Return true if lowering to a jump table is suitable for a set of case clusters which may contain NumC...
virtual bool areJTsAllowed(const Function *Fn) const
Return true if lowering to a jump table is allowed.
bool isOperationLegalOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal using promotion.
bool isOperationCustom(unsigned Op, EVT VT) const
Return true if the operation uses custom lowering, regardless of whether the type is legal or not.
bool isSuitableForBitTests(const DenseMap< const BasicBlock *, unsigned int > &DestCmps, const APInt &Low, const APInt &High, const DataLayout &DL) const
Return true if lowering to a bit test is suitable for a set of case clusters which contains NumDests ...
virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const
Return true if it's free to truncate a value of type FromTy to type ToTy.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g.
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const
Return how this store with truncation should be treated: either it is legal, needs to be promoted to ...
LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return how this load with extension should be treated: either it is legal, needs to be promoted to a ...
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return true if the specified load with extension is legal on this target.
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
virtual bool isFAbsFree(EVT VT) const
Return true if an fabs operation is free to the point where it is never worthwhile to replace it with...
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
std::pair< LegalizeTypeAction, EVT > LegalizeKind
LegalizeKind holds the legalization kind that needs to happen to EVT in order to type-legalize it.
Primary interface to the complete machine description for the target machine.
bool isPositionIndependent() const
const Triple & getTargetTriple() const
virtual const TargetSubtargetInfo * getSubtargetImpl(const Function &) const
Virtual method implemented by subclasses that returns a reference to that target's TargetSubtargetInf...
CodeModel::Model getCodeModel() const
Returns the code model.
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual bool isProfitableLSRChainElement(Instruction *I) const
virtual TailFoldingStyle getPreferredTailFoldingStyle() const
virtual const DataLayout & getDataLayout() const
virtual std::optional< unsigned > getCacheAssociativity(TargetTransformInfo::CacheLevel Level) const
virtual std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) const
virtual bool shouldDropLSRSolutionIfLessProfitable() const
virtual bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) const
virtual bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const
virtual std::optional< Value * > simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed) const
virtual std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const
virtual unsigned getEpilogueVectorizationMinVF() const
virtual InstructionCost getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, bool ForPoisonSrc=true, ArrayRef< Value * > VL={}, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const
virtual bool isLoweredToCall(const Function *F) const
virtual InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info, TTI::OperandValueInfo Opd2Info, ArrayRef< const Value * > Args, const Instruction *CxtI=nullptr) const
virtual InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const
virtual bool isLSRCostLess(const TTI::LSRCost &C1, const TTI::LSRCost &C2) const
virtual InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I) const
virtual InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const
virtual InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info, TTI::OperandValueInfo Op2Info, const Instruction *I) const
InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr, ArrayRef< const Value * > Operands, Type *AccessType, TTI::TargetCostKind CostKind) const override
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
VectorInstrContext
Represents a hint about the context in which an insert/extract is used.
@ None
The insert/extract is not used with a load/store.
static LLVM_ABI OperandValueInfo getOperandInfo(const Value *V)
Collect properties of V used in cost analysis, e.g. OP_PowerOf2.
TargetCostKind
The kind of cost model.
@ TCK_RecipThroughput
Reciprocal throughput.
@ TCK_CodeSize
Instruction code size.
static bool requiresOrderedReduction(std::optional< FastMathFlags > FMF)
A helper function to determine the type of reduction algorithm used for a given Opcode and set of Fas...
@ TCC_Expensive
The cost of a 'div' instruction on x86.
@ TCC_Basic
The cost of a typical 'add' instruction.
MemIndexedMode
The type of load/store indexing.
static VectorInstrContext getVectorInstrContextHint(const Instruction *I)
Calculates a VectorInstrContext from I.
ShuffleKind
The various kinds of shuffle patterns for vector queries.
@ SK_InsertSubvector
InsertSubvector. Index indicates start offset.
@ SK_Select
Selects elements from the corresponding lane of either source operand.
@ SK_PermuteSingleSrc
Shuffle elements of single source vector with any shuffle mask.
@ SK_Transpose
Transpose two vectors.
@ SK_Splice
Concatenates elements from the first input vector with elements of the second input vector.
@ SK_Broadcast
Broadcast element 0 to all other elements.
@ SK_PermuteTwoSrc
Merge elements from two source vectors into one with any shuffle mask.
@ SK_Reverse
Reverse the order of the vector.
@ SK_ExtractSubvector
ExtractSubvector Index indicates start offset.
CastContextHint
Represents a hint about the context in which a cast is used.
@ Normal
The cast is used with a normal load/store.
CacheLevel
The possible cache levels.
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition Triple.h:420
LLVM_ABI bool isArch64Bit() const
Test whether the architecture is 64-bit.
Definition Triple.cpp:1827
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, DriverKit, XROS, or bridgeOS).
Definition Triple.h:639
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition TypeSize.h:343
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Definition Type.cpp:294
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
LLVM_ABI Type * getWithNewBitWidth(unsigned NewBitWidth) const
Given an integer or vector type, change the lane bitwidth to NewBitwidth, whilst keeping the old numb...
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition Type.h:128
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:230
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
Definition Type.cpp:293
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
Definition Type.cpp:300
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition Type.h:225
Type * getContainedType(unsigned i) const
This method is used to implement the type iterator (defined at the end of the file).
Definition Type.h:381
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
Value * getOperand(unsigned i) const
Definition User.h:207
static LLVM_ABI bool isVPBinOp(Intrinsic::ID ID)
static LLVM_ABI bool isVPCast(Intrinsic::ID ID)
static LLVM_ABI bool isVPCmp(Intrinsic::ID ID)
static LLVM_ABI std::optional< unsigned > getFunctionalOpcodeForVP(Intrinsic::ID ID)
static LLVM_ABI std::optional< Intrinsic::ID > getFunctionalIntrinsicIDForVP(Intrinsic::ID ID)
static LLVM_ABI bool isVPIntrinsic(Intrinsic::ID)
static LLVM_ABI bool isVPReduction(Intrinsic::ID ID)
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
Base class of all SIMD vector types.
static VectorType * getHalfElementsVectorType(VectorType *VTy)
This static method returns a VectorType with half as many elements as the input type and the same ele...
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Type * getElementType() const
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:216
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
LLVM_ABI APInt ScaleBitMask(const APInt &A, unsigned NewBitWidth, bool MatchAllBits=false)
Splat/Merge neighboring bits to widen/narrow the bitmask represented by.
Definition APInt.cpp:3020
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
ISD namespace - This namespace contains an enum which represents all of the SelectionDAG node types a...
Definition ISDOpcodes.h:24
@ BSWAP
Byte Swap and Counting operators.
Definition ISDOpcodes.h:779
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
Definition ISDOpcodes.h:394
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
Definition ISDOpcodes.h:518
@ FMODF
FMODF - Decomposes the operand into integral and fractional parts, each having the same type and sign...
@ FATAN2
FATAN2 - atan2, inspired by libm.
@ FSINCOSPI
FSINCOSPI - Compute both the sine and cosine times pi more accurately than FSINCOS(pi*x),...
@ FADD
Simple binary floating point operators.
Definition ISDOpcodes.h:417
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
Definition ISDOpcodes.h:747
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition ISDOpcodes.h:280
@ CLMUL
Carry-less multiplication operations.
Definition ISDOpcodes.h:774
@ FLDEXP
FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ SSUBO
Same for subtraction.
Definition ISDOpcodes.h:352
@ BRIND
BRIND - Indirect branch.
@ BR_JT
BR_JT - Jumptable branch.
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
Definition ISDOpcodes.h:541
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
Definition ISDOpcodes.h:374
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
Definition ISDOpcodes.h:796
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
Definition ISDOpcodes.h:348
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum maximum on two values, following IEEE-754 definition...
@ SMULO
Same for multiplication.
Definition ISDOpcodes.h:356
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
Definition ISDOpcodes.h:727
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
Definition ISDOpcodes.h:805
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ SCMP
[US]CMP - 3-way comparison of signed or unsigned integers.
Definition ISDOpcodes.h:735
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
Definition ISDOpcodes.h:945
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition ISDOpcodes.h:534
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
Definition ISDOpcodes.h:365
@ FMINIMUMNUM
FMINIMUMNUM/FMAXIMUMNUM - minimumnum/maximumnum that is same with FMINNUM_IEEE and FMAXNUM_IEEE besid...
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
LLVM_ABI bool isTargetIntrinsic(ID IID)
isTargetIntrinsic - Returns true if IID is an intrinsic specific to a certain target.
LLVM_ABI Libcall getSINCOSPI(EVT RetVT)
getSINCOSPI - Return the SINCOSPI_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getMODF(EVT VT)
getMODF - Return the MODF_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getSINCOS(EVT RetVT)
getSINCOS - Return the SINCOS_* value for the given types, or UNKNOWN_LIBCALL if there is none.
DiagnosticInfoOptimizationBase::Argument NV
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1739
LLVM_ABI Intrinsic::ID getMinMaxReductionIntrinsicOp(Intrinsic::ID RdxID)
Returns the min/max intrinsic used when expanding a min/max reduction.
detail::zippy< detail::zip_first, T, U, Args... > zip_equal(T &&t, U &&u, Args &&...args)
zip iterator that assumes that all iteratees have the same length.
Definition STLExtras.h:841
InstructionCost Cost
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2554
Type * toScalarizedTy(Type *Ty)
A helper for converting vectorized types to scalarized (non-vector) types.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
auto dyn_cast_if_present(const Y &Val)
dyn_cast_if_present<X> - Functionally identical to dyn_cast, except that a null (or none in the case ...
Definition Casting.h:732
LLVM_ABI unsigned getArithmeticReductionInstruction(Intrinsic::ID RdxID)
Returns the arithmetic instruction opcode used when expanding a reduction.
bool isVectorizedTy(Type *Ty)
Returns true if Ty is a vector type or a struct of vector types where all vector types share the same...
detail::concat_range< ValueT, RangeTs... > concat(RangeTs &&...Ranges)
Returns a concatenated range across two or more ranges.
Definition STLExtras.h:1152
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
constexpr bool has_single_bit(T Value) noexcept
Definition bit.h:147
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1746
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:331
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
ElementCount getVectorizedTypeVF(Type *Ty)
Returns the number of vector elements for a vectorized type.
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
constexpr int PoisonMaskElem
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
Definition MathExtras.h:394
FunctionAddr VTableAddr uintptr_t uintptr_t Data
Definition InstrProf.h:189
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ UMax
Unsigned integer max implemented in terms of select(cmp()).
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
ArrayRef< Type * > getContainedTypes(Type *const &Ty)
Returns the types contained in Ty.
cl::opt< unsigned > PartialUnrollingThreshold
LLVM_ABI bool isVectorizedStructTy(StructType *StructTy)
Returns true if StructTy is an unpacked literal struct where all elements are vectors of matching ele...
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Extended Value Type.
Definition ValueTypes.h:35
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition ValueTypes.h:137
ElementCount getVectorElementCount() const
Definition ValueTypes.h:350
static LLVM_ABI EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition ValueTypes.h:316
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
Definition ValueTypes.h:65
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Attributes of a target dependent hardware loop.
static bool hasVectorMaskArgument(RTLIB::LibcallImpl Impl)
Returns true if the function has a vector mask argument, which is assumed to be the last argument.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
bool AllowPeeling
Allow peeling off loop iterations.
bool AllowLoopNestsPeeling
Allow peeling off loop iterations for loop nests.
bool PeelProfiledIterations
Allow peeling basing on profile.
unsigned PeelCount
A forced peeling factor (the number of bodied of the original loop that should be peeled off before t...
Parameters that control the generic loop unrolling transformation.
bool UpperBound
Allow using trip count upper bound to unroll loops.
unsigned PartialOptSizeThreshold
The cost threshold for the unrolled loop when optimizing for size, like OptSizeThreshold,...
unsigned PartialThreshold
The cost threshold for the unrolled loop, like Threshold, but used for partial/runtime unrolling (set...
bool Runtime
Allow runtime unrolling (unrolling of loops to expand the size of the loop body even when the number ...
bool Partial
Allow partial unrolling (unrolling of loops to expand the size of the loop body, not only to eliminat...
unsigned OptSizeThreshold
The cost threshold for the unrolled loop when optimizing for size (set to UINT_MAX to disable).