LLVM 22.0.0git
BasicTTIImpl.h
Go to the documentation of this file.
1//===- BasicTTIImpl.h -------------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file provides a helper that implements much of the TTI interface in
11/// terms of the target-independent code generator and TargetLowering
12/// interfaces.
13//
14//===----------------------------------------------------------------------===//
15
16#ifndef LLVM_CODEGEN_BASICTTIIMPL_H
17#define LLVM_CODEGEN_BASICTTIIMPL_H
18
19#include "llvm/ADT/APInt.h"
20#include "llvm/ADT/BitVector.h"
21#include "llvm/ADT/STLExtras.h"
35#include "llvm/IR/BasicBlock.h"
36#include "llvm/IR/Constant.h"
37#include "llvm/IR/Constants.h"
38#include "llvm/IR/DataLayout.h"
40#include "llvm/IR/InstrTypes.h"
41#include "llvm/IR/Instruction.h"
43#include "llvm/IR/Intrinsics.h"
44#include "llvm/IR/Operator.h"
45#include "llvm/IR/Type.h"
46#include "llvm/IR/Value.h"
54#include <algorithm>
55#include <cassert>
56#include <cstdint>
57#include <limits>
58#include <optional>
59#include <utility>
60
61namespace llvm {
62
63class Function;
64class GlobalValue;
65class LLVMContext;
66class ScalarEvolution;
67class SCEV;
68class TargetMachine;
69
71
72/// Base class which can be used to help build a TTI implementation.
73///
74/// This class provides as much implementation of the TTI interface as is
75/// possible using the target independent parts of the code generator.
76///
77/// In order to subclass it, your class must implement a getST() method to
78/// return the subtarget, and a getTLI() method to return the target lowering.
79/// We need these methods implemented in the derived class so that this class
80/// doesn't have to duplicate storage for them.
81template <typename T>
83private:
85 using TTI = TargetTransformInfo;
86
87 /// Helper function to access this as a T.
88 const T *thisT() const { return static_cast<const T *>(this); }
89
90 /// Estimate a cost of Broadcast as an extract and sequence of insert
91 /// operations.
93 getBroadcastShuffleOverhead(FixedVectorType *VTy,
96 // Broadcast cost is equal to the cost of extracting the zero'th element
97 // plus the cost of inserting it into every element of the result vector.
98 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
99 CostKind, 0, nullptr, nullptr);
100
101 for (int i = 0, e = VTy->getNumElements(); i < e; ++i) {
102 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
103 CostKind, i, nullptr, nullptr);
104 }
105 return Cost;
106 }
107
108 /// Estimate a cost of shuffle as a sequence of extract and insert
109 /// operations.
111 getPermuteShuffleOverhead(FixedVectorType *VTy,
114 // Shuffle cost is equal to the cost of extracting element from its argument
115 // plus the cost of inserting them onto the result vector.
116
117 // e.g. <4 x float> has a mask of <0,5,2,7> i.e we need to extract from
118 // index 0 of first vector, index 1 of second vector,index 2 of first
119 // vector and finally index 3 of second vector and insert them at index
120 // <0,1,2,3> of result vector.
121 for (int i = 0, e = VTy->getNumElements(); i < e; ++i) {
122 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
123 CostKind, i, nullptr, nullptr);
124 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
125 CostKind, i, nullptr, nullptr);
126 }
127 return Cost;
128 }
129
130 /// Estimate a cost of subvector extraction as a sequence of extract and
131 /// insert operations.
132 InstructionCost getExtractSubvectorOverhead(VectorType *VTy,
134 int Index,
135 FixedVectorType *SubVTy) const {
136 assert(VTy && SubVTy &&
137 "Can only extract subvectors from vectors");
138 int NumSubElts = SubVTy->getNumElements();
140 (Index + NumSubElts) <=
142 "SK_ExtractSubvector index out of range");
143
145 // Subvector extraction cost is equal to the cost of extracting element from
146 // the source type plus the cost of inserting them into the result vector
147 // type.
148 for (int i = 0; i != NumSubElts; ++i) {
149 Cost +=
150 thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
151 CostKind, i + Index, nullptr, nullptr);
152 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, SubVTy,
153 CostKind, i, nullptr, nullptr);
154 }
155 return Cost;
156 }
157
158 /// Estimate a cost of subvector insertion as a sequence of extract and
159 /// insert operations.
160 InstructionCost getInsertSubvectorOverhead(VectorType *VTy,
162 int Index,
163 FixedVectorType *SubVTy) const {
164 assert(VTy && SubVTy &&
165 "Can only insert subvectors into vectors");
166 int NumSubElts = SubVTy->getNumElements();
168 (Index + NumSubElts) <=
170 "SK_InsertSubvector index out of range");
171
173 // Subvector insertion cost is equal to the cost of extracting element from
174 // the source type plus the cost of inserting them into the result vector
175 // type.
176 for (int i = 0; i != NumSubElts; ++i) {
177 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, SubVTy,
178 CostKind, i, nullptr, nullptr);
179 Cost +=
180 thisT()->getVectorInstrCost(Instruction::InsertElement, VTy, CostKind,
181 i + Index, nullptr, nullptr);
182 }
183 return Cost;
184 }
185
186 /// Local query method delegates up to T which *must* implement this!
187 const TargetSubtargetInfo *getST() const {
188 return static_cast<const T *>(this)->getST();
189 }
190
191 /// Local query method delegates up to T which *must* implement this!
192 const TargetLoweringBase *getTLI() const {
193 return static_cast<const T *>(this)->getTLI();
194 }
195
196 static ISD::MemIndexedMode getISDIndexedMode(TTI::MemIndexedMode M) {
197 switch (M) {
199 return ISD::UNINDEXED;
200 case TTI::MIM_PreInc:
201 return ISD::PRE_INC;
202 case TTI::MIM_PreDec:
203 return ISD::PRE_DEC;
204 case TTI::MIM_PostInc:
205 return ISD::POST_INC;
206 case TTI::MIM_PostDec:
207 return ISD::POST_DEC;
208 }
209 llvm_unreachable("Unexpected MemIndexedMode");
210 }
211
212 InstructionCost getCommonMaskedMemoryOpCost(unsigned Opcode, Type *DataTy,
213 Align Alignment,
214 bool VariableMask,
215 bool IsGatherScatter,
217 unsigned AddressSpace = 0) const {
218 // We cannot scalarize scalable vectors, so return Invalid.
219 if (isa<ScalableVectorType>(DataTy))
221
222 auto *VT = cast<FixedVectorType>(DataTy);
223 unsigned VF = VT->getNumElements();
224
225 // Assume the target does not have support for gather/scatter operations
226 // and provide a rough estimate.
227 //
228 // First, compute the cost of the individual memory operations.
229 InstructionCost AddrExtractCost =
230 IsGatherScatter ? getScalarizationOverhead(
232 PointerType::get(VT->getContext(), 0), VF),
233 /*Insert=*/false, /*Extract=*/true, CostKind)
234 : 0;
235
236 // The cost of the scalar loads/stores.
237 InstructionCost MemoryOpCost =
238 VF * thisT()->getMemoryOpCost(Opcode, VT->getElementType(), Alignment,
240
241 // Next, compute the cost of packing the result in a vector.
242 InstructionCost PackingCost =
243 getScalarizationOverhead(VT, Opcode != Instruction::Store,
244 Opcode == Instruction::Store, CostKind);
245
246 InstructionCost ConditionalCost = 0;
247 if (VariableMask) {
248 // Compute the cost of conditionally executing the memory operations with
249 // variable masks. This includes extracting the individual conditions, a
250 // branches and PHIs to combine the results.
251 // NOTE: Estimating the cost of conditionally executing the memory
252 // operations accurately is quite difficult and the current solution
253 // provides a very rough estimate only.
254 ConditionalCost =
257 /*Insert=*/false, /*Extract=*/true, CostKind) +
258 VF * (thisT()->getCFInstrCost(Instruction::Br, CostKind) +
259 thisT()->getCFInstrCost(Instruction::PHI, CostKind));
260 }
261
262 return AddrExtractCost + MemoryOpCost + PackingCost + ConditionalCost;
263 }
264
265 /// Checks if the provided mask \p is a splat mask, i.e. it contains only -1
266 /// or same non -1 index value and this index value contained at least twice.
267 /// So, mask <0, -1,-1, -1> is not considered splat (it is just identity),
268 /// same for <-1, 0, -1, -1> (just a slide), while <2, -1, 2, -1> is a splat
269 /// with \p Index=2.
270 static bool isSplatMask(ArrayRef<int> Mask, unsigned NumSrcElts, int &Index) {
271 // Check that the broadcast index meets at least twice.
272 bool IsCompared = false;
273 if (int SplatIdx = PoisonMaskElem;
274 all_of(enumerate(Mask), [&](const auto &P) {
275 if (P.value() == PoisonMaskElem)
276 return P.index() != Mask.size() - 1 || IsCompared;
277 if (static_cast<unsigned>(P.value()) >= NumSrcElts * 2)
278 return false;
279 if (SplatIdx == PoisonMaskElem) {
280 SplatIdx = P.value();
281 return P.index() != Mask.size() - 1;
282 }
283 IsCompared = true;
284 return SplatIdx == P.value();
285 })) {
286 Index = SplatIdx;
287 return true;
288 }
289 return false;
290 }
291
292 /// Several intrinsics that return structs (including llvm.sincos[pi] and
293 /// llvm.modf) can be lowered to a vector library call (for certain VFs). The
294 /// vector library functions correspond to the scalar calls (e.g. sincos or
295 /// modf), which unlike the intrinsic return values via output pointers. This
296 /// helper checks if a vector call exists for the given intrinsic, and returns
297 /// the cost, which includes the cost of the mask (if required), and the loads
298 /// for values returned via output pointers. \p LC is the scalar libcall and
299 /// \p CallRetElementIndex (optional) is the struct element which is mapped to
300 /// the call return value. If std::nullopt is returned, then no vector library
301 /// call is available, so the intrinsic should be assigned the default cost
302 /// (e.g. scalarization).
303 std::optional<InstructionCost> getMultipleResultIntrinsicVectorLibCallCost(
305 std::optional<unsigned> CallRetElementIndex = {}) const {
306 Type *RetTy = ICA.getReturnType();
307 // Vector variants of the intrinsic can be mapped to a vector library call.
308 auto const *LibInfo = ICA.getLibInfo();
309 if (!LibInfo || !isa<StructType>(RetTy) ||
311 return std::nullopt;
312
313 Type *Ty = getContainedTypes(RetTy).front();
314 EVT VT = getTLI()->getValueType(DL, Ty);
315
316 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
317
318 switch (ICA.getID()) {
319 case Intrinsic::modf:
320 LC = RTLIB::getMODF(VT);
321 break;
322 case Intrinsic::sincospi:
323 LC = RTLIB::getSINCOSPI(VT);
324 break;
325 case Intrinsic::sincos:
326 LC = RTLIB::getSINCOS(VT);
327 break;
328 default:
329 return std::nullopt;
330 }
331
332 // Find associated libcall.
333 RTLIB::LibcallImpl LibcallImpl = getTLI()->getLibcallImpl(LC);
334 if (LibcallImpl == RTLIB::Unsupported)
335 return std::nullopt;
336
337 LLVMContext &Ctx = RetTy->getContext();
338
339 // Cost the call + mask.
340 auto Cost =
341 thisT()->getCallInstrCost(nullptr, RetTy, ICA.getArgTypes(), CostKind);
342
345 auto VecTy = VectorType::get(IntegerType::getInt1Ty(Ctx), VF);
346 Cost += thisT()->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy,
347 VecTy, {}, CostKind, 0, nullptr, {});
348 }
349
350 // Lowering to a library call (with output pointers) may require us to emit
351 // reloads for the results.
352 for (auto [Idx, VectorTy] : enumerate(getContainedTypes(RetTy))) {
353 if (Idx == CallRetElementIndex)
354 continue;
355 Cost += thisT()->getMemoryOpCost(
356 Instruction::Load, VectorTy,
357 thisT()->getDataLayout().getABITypeAlign(VectorTy), 0, CostKind);
358 }
359 return Cost;
360 }
361
362 /// Filter out constant and duplicated entries in \p Ops and return a vector
363 /// containing the types from \p Tys corresponding to the remaining operands.
365 filterConstantAndDuplicatedOperands(ArrayRef<const Value *> Ops,
366 ArrayRef<Type *> Tys) {
367 SmallPtrSet<const Value *, 4> UniqueOperands;
368 SmallVector<Type *, 4> FilteredTys;
369 for (const auto &[Op, Ty] : zip_equal(Ops, Tys)) {
370 if (isa<Constant>(Op) || !UniqueOperands.insert(Op).second)
371 continue;
372 FilteredTys.push_back(Ty);
373 }
374 return FilteredTys;
375 }
376
377protected:
378 explicit BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
379 : BaseT(DL) {}
380 ~BasicTTIImplBase() override = default;
381
383
384public:
385 /// \name Scalar TTI Implementations
386 /// @{
388 unsigned AddressSpace, Align Alignment,
389 unsigned *Fast) const override {
390 EVT E = EVT::getIntegerVT(Context, BitWidth);
391 return getTLI()->allowsMisalignedMemoryAccesses(
393 }
394
395 bool areInlineCompatible(const Function *Caller,
396 const Function *Callee) const override {
397 const TargetMachine &TM = getTLI()->getTargetMachine();
398
399 const FeatureBitset &CallerBits =
400 TM.getSubtargetImpl(*Caller)->getFeatureBits();
401 const FeatureBitset &CalleeBits =
402 TM.getSubtargetImpl(*Callee)->getFeatureBits();
403
404 // Inline a callee if its target-features are a subset of the callers
405 // target-features.
406 return (CallerBits & CalleeBits) == CalleeBits;
407 }
408
409 bool hasBranchDivergence(const Function *F = nullptr) const override {
410 return false;
411 }
412
413 bool isSourceOfDivergence(const Value *V) const override { return false; }
414
415 bool isAlwaysUniform(const Value *V) const override { return false; }
416
417 bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const override {
418 return false;
419 }
420
421 bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const override {
422 return true;
423 }
424
425 unsigned getFlatAddressSpace() const override {
426 // Return an invalid address space.
427 return -1;
428 }
429
431 Intrinsic::ID IID) const override {
432 return false;
433 }
434
435 bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const override {
436 return getTLI()->getTargetMachine().isNoopAddrSpaceCast(FromAS, ToAS);
437 }
438
439 unsigned getAssumedAddrSpace(const Value *V) const override {
440 return getTLI()->getTargetMachine().getAssumedAddrSpace(V);
441 }
442
443 bool isSingleThreaded() const override {
444 return getTLI()->getTargetMachine().Options.ThreadModel ==
446 }
447
448 std::pair<const Value *, unsigned>
449 getPredicatedAddrSpace(const Value *V) const override {
450 return getTLI()->getTargetMachine().getPredicatedAddrSpace(V);
451 }
452
454 Value *NewV) const override {
455 return nullptr;
456 }
457
458 bool isLegalAddImmediate(int64_t imm) const override {
459 return getTLI()->isLegalAddImmediate(imm);
460 }
461
462 bool isLegalAddScalableImmediate(int64_t Imm) const override {
463 return getTLI()->isLegalAddScalableImmediate(Imm);
464 }
465
466 bool isLegalICmpImmediate(int64_t imm) const override {
467 return getTLI()->isLegalICmpImmediate(imm);
468 }
469
470 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
471 bool HasBaseReg, int64_t Scale, unsigned AddrSpace,
472 Instruction *I = nullptr,
473 int64_t ScalableOffset = 0) const override {
475 AM.BaseGV = BaseGV;
476 AM.BaseOffs = BaseOffset;
477 AM.HasBaseReg = HasBaseReg;
478 AM.Scale = Scale;
479 AM.ScalableOffset = ScalableOffset;
480 return getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace, I);
481 }
482
483 int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset, int64_t MaxOffset) {
484 return getTLI()->getPreferredLargeGEPBaseOffset(MinOffset, MaxOffset);
485 }
486
487 unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy,
488 Type *ScalarValTy) const override {
489 auto &&IsSupportedByTarget = [this, ScalarMemTy, ScalarValTy](unsigned VF) {
490 auto *SrcTy = FixedVectorType::get(ScalarMemTy, VF / 2);
491 EVT VT = getTLI()->getValueType(DL, SrcTy);
492 if (getTLI()->isOperationLegal(ISD::STORE, VT) ||
493 getTLI()->isOperationCustom(ISD::STORE, VT))
494 return true;
495
496 EVT ValVT =
497 getTLI()->getValueType(DL, FixedVectorType::get(ScalarValTy, VF / 2));
498 EVT LegalizedVT =
499 getTLI()->getTypeToTransformTo(ScalarMemTy->getContext(), VT);
500 return getTLI()->isTruncStoreLegal(LegalizedVT, ValVT);
501 };
502 while (VF > 2 && IsSupportedByTarget(VF))
503 VF /= 2;
504 return VF;
505 }
506
507 bool isIndexedLoadLegal(TTI::MemIndexedMode M, Type *Ty) const override {
508 EVT VT = getTLI()->getValueType(DL, Ty, /*AllowUnknown=*/true);
509 return getTLI()->isIndexedLoadLegal(getISDIndexedMode(M), VT);
510 }
511
512 bool isIndexedStoreLegal(TTI::MemIndexedMode M, Type *Ty) const override {
513 EVT VT = getTLI()->getValueType(DL, Ty, /*AllowUnknown=*/true);
514 return getTLI()->isIndexedStoreLegal(getISDIndexedMode(M), VT);
515 }
516
518 const TTI::LSRCost &C2) const override {
520 }
521
525
529
533
535 StackOffset BaseOffset, bool HasBaseReg,
536 int64_t Scale,
537 unsigned AddrSpace) const override {
539 AM.BaseGV = BaseGV;
540 AM.BaseOffs = BaseOffset.getFixed();
541 AM.HasBaseReg = HasBaseReg;
542 AM.Scale = Scale;
543 AM.ScalableOffset = BaseOffset.getScalable();
544 if (getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace))
545 return 0;
547 }
548
549 bool isTruncateFree(Type *Ty1, Type *Ty2) const override {
550 return getTLI()->isTruncateFree(Ty1, Ty2);
551 }
552
553 bool isProfitableToHoist(Instruction *I) const override {
554 return getTLI()->isProfitableToHoist(I);
555 }
556
557 bool useAA() const override { return getST()->useAA(); }
558
559 bool isTypeLegal(Type *Ty) const override {
560 EVT VT = getTLI()->getValueType(DL, Ty, /*AllowUnknown=*/true);
561 return getTLI()->isTypeLegal(VT);
562 }
563
564 unsigned getRegUsageForType(Type *Ty) const override {
565 EVT ETy = getTLI()->getValueType(DL, Ty);
566 return getTLI()->getNumRegisters(Ty->getContext(), ETy);
567 }
568
569 InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr,
570 ArrayRef<const Value *> Operands, Type *AccessType,
571 TTI::TargetCostKind CostKind) const override {
572 return BaseT::getGEPCost(PointeeType, Ptr, Operands, AccessType, CostKind);
573 }
574
576 const SwitchInst &SI, unsigned &JumpTableSize, ProfileSummaryInfo *PSI,
577 BlockFrequencyInfo *BFI) const override {
578 /// Try to find the estimated number of clusters. Note that the number of
579 /// clusters identified in this function could be different from the actual
580 /// numbers found in lowering. This function ignore switches that are
581 /// lowered with a mix of jump table / bit test / BTree. This function was
582 /// initially intended to be used when estimating the cost of switch in
583 /// inline cost heuristic, but it's a generic cost model to be used in other
584 /// places (e.g., in loop unrolling).
585 unsigned N = SI.getNumCases();
586 const TargetLoweringBase *TLI = getTLI();
587 const DataLayout &DL = this->getDataLayout();
588
589 JumpTableSize = 0;
590 bool IsJTAllowed = TLI->areJTsAllowed(SI.getParent()->getParent());
591
592 // Early exit if both a jump table and bit test are not allowed.
593 if (N < 1 || (!IsJTAllowed && DL.getIndexSizeInBits(0u) < N))
594 return N;
595
596 APInt MaxCaseVal = SI.case_begin()->getCaseValue()->getValue();
597 APInt MinCaseVal = MaxCaseVal;
598 for (auto CI : SI.cases()) {
599 const APInt &CaseVal = CI.getCaseValue()->getValue();
600 if (CaseVal.sgt(MaxCaseVal))
601 MaxCaseVal = CaseVal;
602 if (CaseVal.slt(MinCaseVal))
603 MinCaseVal = CaseVal;
604 }
605
606 // Check if suitable for a bit test
607 if (N <= DL.getIndexSizeInBits(0u)) {
609 for (auto I : SI.cases()) {
610 const BasicBlock *BB = I.getCaseSuccessor();
611 ++DestMap[BB];
612 }
613
614 if (TLI->isSuitableForBitTests(DestMap, MinCaseVal, MaxCaseVal, DL))
615 return 1;
616 }
617
618 // Check if suitable for a jump table.
619 if (IsJTAllowed) {
620 if (N < 2 || N < TLI->getMinimumJumpTableEntries())
621 return N;
623 (MaxCaseVal - MinCaseVal)
624 .getLimitedValue(std::numeric_limits<uint64_t>::max() - 1) + 1;
625 // Check whether a range of clusters is dense enough for a jump table
626 if (TLI->isSuitableForJumpTable(&SI, N, Range, PSI, BFI)) {
627 JumpTableSize = Range;
628 return 1;
629 }
630 }
631 return N;
632 }
633
634 bool shouldBuildLookupTables() const override {
635 const TargetLoweringBase *TLI = getTLI();
636 return TLI->isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
637 TLI->isOperationLegalOrCustom(ISD::BRIND, MVT::Other);
638 }
639
640 bool shouldBuildRelLookupTables() const override {
641 const TargetMachine &TM = getTLI()->getTargetMachine();
642 // If non-PIC mode, do not generate a relative lookup table.
643 if (!TM.isPositionIndependent())
644 return false;
645
646 /// Relative lookup table entries consist of 32-bit offsets.
647 /// Do not generate relative lookup tables for large code models
648 /// in 64-bit achitectures where 32-bit offsets might not be enough.
649 if (TM.getCodeModel() == CodeModel::Medium ||
651 return false;
652
653 const Triple &TargetTriple = TM.getTargetTriple();
654 if (!TargetTriple.isArch64Bit())
655 return false;
656
657 // TODO: Triggers issues on aarch64 on darwin, so temporarily disable it
658 // there.
659 if (TargetTriple.getArch() == Triple::aarch64 && TargetTriple.isOSDarwin())
660 return false;
661
662 return true;
663 }
664
665 bool haveFastSqrt(Type *Ty) const override {
666 const TargetLoweringBase *TLI = getTLI();
667 EVT VT = TLI->getValueType(DL, Ty);
668 return TLI->isTypeLegal(VT) &&
669 TLI->isOperationLegalOrCustom(ISD::FSQRT, VT);
670 }
671
672 bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) const override { return true; }
673
674 InstructionCost getFPOpCost(Type *Ty) const override {
675 // Check whether FADD is available, as a proxy for floating-point in
676 // general.
677 const TargetLoweringBase *TLI = getTLI();
678 EVT VT = TLI->getValueType(DL, Ty);
682 }
683
685 const Function &Fn) const override {
686 switch (Inst.getOpcode()) {
687 default:
688 break;
689 case Instruction::SDiv:
690 case Instruction::SRem:
691 case Instruction::UDiv:
692 case Instruction::URem: {
693 if (!isa<ConstantInt>(Inst.getOperand(1)))
694 return false;
695 EVT VT = getTLI()->getValueType(DL, Inst.getType());
696 return !getTLI()->isIntDivCheap(VT, Fn.getAttributes());
697 }
698 };
699
700 return false;
701 }
702
703 unsigned getInliningThresholdMultiplier() const override { return 1; }
704 unsigned adjustInliningThreshold(const CallBase *CB) const override {
705 return 0;
706 }
707 unsigned getCallerAllocaCost(const CallBase *CB,
708 const AllocaInst *AI) const override {
709 return 0;
710 }
711
712 int getInlinerVectorBonusPercent() const override { return 150; }
713
716 OptimizationRemarkEmitter *ORE) const override {
717 // This unrolling functionality is target independent, but to provide some
718 // motivation for its intended use, for x86:
719
720 // According to the Intel 64 and IA-32 Architectures Optimization Reference
721 // Manual, Intel Core models and later have a loop stream detector (and
722 // associated uop queue) that can benefit from partial unrolling.
723 // The relevant requirements are:
724 // - The loop must have no more than 4 (8 for Nehalem and later) branches
725 // taken, and none of them may be calls.
726 // - The loop can have no more than 18 (28 for Nehalem and later) uops.
727
728 // According to the Software Optimization Guide for AMD Family 15h
729 // Processors, models 30h-4fh (Steamroller and later) have a loop predictor
730 // and loop buffer which can benefit from partial unrolling.
731 // The relevant requirements are:
732 // - The loop must have fewer than 16 branches
733 // - The loop must have less than 40 uops in all executed loop branches
734
735 // The number of taken branches in a loop is hard to estimate here, and
736 // benchmarking has revealed that it is better not to be conservative when
737 // estimating the branch count. As a result, we'll ignore the branch limits
738 // until someone finds a case where it matters in practice.
739
740 unsigned MaxOps;
741 const TargetSubtargetInfo *ST = getST();
742 if (PartialUnrollingThreshold.getNumOccurrences() > 0)
744 else if (ST->getSchedModel().LoopMicroOpBufferSize > 0)
745 MaxOps = ST->getSchedModel().LoopMicroOpBufferSize;
746 else
747 return;
748
749 // Scan the loop: don't unroll loops with calls.
750 for (BasicBlock *BB : L->blocks()) {
751 for (Instruction &I : *BB) {
752 if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
753 if (const Function *F = cast<CallBase>(I).getCalledFunction()) {
754 if (!thisT()->isLoweredToCall(F))
755 continue;
756 }
757
758 if (ORE) {
759 ORE->emit([&]() {
760 return OptimizationRemark("TTI", "DontUnroll", L->getStartLoc(),
761 L->getHeader())
762 << "advising against unrolling the loop because it "
763 "contains a "
764 << ore::NV("Call", &I);
765 });
766 }
767 return;
768 }
769 }
770 }
771
772 // Enable runtime and partial unrolling up to the specified size.
773 // Enable using trip count upper bound to unroll loops.
774 UP.Partial = UP.Runtime = UP.UpperBound = true;
775 UP.PartialThreshold = MaxOps;
776
777 // Avoid unrolling when optimizing for size.
778 UP.OptSizeThreshold = 0;
780
781 // Set number of instructions optimized when "back edge"
782 // becomes "fall through" to default value of 2.
783 UP.BEInsns = 2;
784 }
785
787 TTI::PeelingPreferences &PP) const override {
788 PP.PeelCount = 0;
789 PP.AllowPeeling = true;
790 PP.AllowLoopNestsPeeling = false;
791 PP.PeelProfiledIterations = true;
792 }
793
796 HardwareLoopInfo &HWLoopInfo) const override {
797 return BaseT::isHardwareLoopProfitable(L, SE, AC, LibInfo, HWLoopInfo);
798 }
799
800 unsigned getEpilogueVectorizationMinVF() const override {
802 }
803
806 }
807
809 getPreferredTailFoldingStyle(bool IVUpdateMayOverflow = true) const override {
810 return BaseT::getPreferredTailFoldingStyle(IVUpdateMayOverflow);
811 }
812
813 std::optional<Instruction *>
816 }
817
818 std::optional<Value *>
820 APInt DemandedMask, KnownBits &Known,
821 bool &KnownBitsComputed) const override {
822 return BaseT::simplifyDemandedUseBitsIntrinsic(IC, II, DemandedMask, Known,
823 KnownBitsComputed);
824 }
825
827 InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
828 APInt &UndefElts2, APInt &UndefElts3,
829 std::function<void(Instruction *, unsigned, APInt, APInt &)>
830 SimplifyAndSetOp) const override {
832 IC, II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
833 SimplifyAndSetOp);
834 }
835
836 std::optional<unsigned>
838 return std::optional<unsigned>(
839 getST()->getCacheSize(static_cast<unsigned>(Level)));
840 }
841
842 std::optional<unsigned>
844 std::optional<unsigned> TargetResult =
845 getST()->getCacheAssociativity(static_cast<unsigned>(Level));
846
847 if (TargetResult)
848 return TargetResult;
849
850 return BaseT::getCacheAssociativity(Level);
851 }
852
853 unsigned getCacheLineSize() const override {
854 return getST()->getCacheLineSize();
855 }
856
857 unsigned getPrefetchDistance() const override {
858 return getST()->getPrefetchDistance();
859 }
860
861 unsigned getMinPrefetchStride(unsigned NumMemAccesses,
862 unsigned NumStridedMemAccesses,
863 unsigned NumPrefetches,
864 bool HasCall) const override {
865 return getST()->getMinPrefetchStride(NumMemAccesses, NumStridedMemAccesses,
866 NumPrefetches, HasCall);
867 }
868
869 unsigned getMaxPrefetchIterationsAhead() const override {
870 return getST()->getMaxPrefetchIterationsAhead();
871 }
872
873 bool enableWritePrefetching() const override {
874 return getST()->enableWritePrefetching();
875 }
876
877 bool shouldPrefetchAddressSpace(unsigned AS) const override {
878 return getST()->shouldPrefetchAddressSpace(AS);
879 }
880
881 /// @}
882
883 /// \name Vector TTI Implementations
884 /// @{
885
890
891 std::optional<unsigned> getMaxVScale() const override { return std::nullopt; }
892 std::optional<unsigned> getVScaleForTuning() const override {
893 return std::nullopt;
894 }
895 bool isVScaleKnownToBeAPowerOfTwo() const override { return false; }
896
897 /// Estimate the overhead of scalarizing an instruction. Insert and Extract
898 /// are set if the demanded result elements need to be inserted and/or
899 /// extracted from vectors.
901 VectorType *InTy, const APInt &DemandedElts, bool Insert, bool Extract,
902 TTI::TargetCostKind CostKind, bool ForPoisonSrc = true,
903 ArrayRef<Value *> VL = {}) const override {
904 /// FIXME: a bitfield is not a reasonable abstraction for talking about
905 /// which elements are needed from a scalable vector
906 if (isa<ScalableVectorType>(InTy))
908 auto *Ty = cast<FixedVectorType>(InTy);
909
910 assert(DemandedElts.getBitWidth() == Ty->getNumElements() &&
911 (VL.empty() || VL.size() == Ty->getNumElements()) &&
912 "Vector size mismatch");
913
915
916 for (int i = 0, e = Ty->getNumElements(); i < e; ++i) {
917 if (!DemandedElts[i])
918 continue;
919 if (Insert) {
920 Value *InsertedVal = VL.empty() ? nullptr : VL[i];
921 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, Ty,
922 CostKind, i, nullptr, InsertedVal);
923 }
924 if (Extract)
925 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
926 CostKind, i, nullptr, nullptr);
927 }
928
929 return Cost;
930 }
931
933 return false;
934 }
935
936 bool
938 unsigned ScalarOpdIdx) const override {
939 return false;
940 }
941
943 int OpdIdx) const override {
944 return OpdIdx == -1;
945 }
946
947 bool
949 int RetIdx) const override {
950 return RetIdx == 0;
951 }
952
953 /// Helper wrapper for the DemandedElts variant of getScalarizationOverhead.
955 bool Extract,
957 if (isa<ScalableVectorType>(InTy))
959 auto *Ty = cast<FixedVectorType>(InTy);
960
961 APInt DemandedElts = APInt::getAllOnes(Ty->getNumElements());
962 return thisT()->getScalarizationOverhead(Ty, DemandedElts, Insert, Extract,
963 CostKind);
964 }
965
966 /// Estimate the overhead of scalarizing an instruction's
967 /// operands. The (potentially vector) types to use for each of
968 /// argument are passes via Tys.
970 ArrayRef<Type *> Tys, TTI::TargetCostKind CostKind) const override {
972 for (Type *Ty : Tys) {
973 // Disregard things like metadata arguments.
974 if (!Ty->isIntOrIntVectorTy() && !Ty->isFPOrFPVectorTy() &&
975 !Ty->isPtrOrPtrVectorTy())
976 continue;
977
978 if (auto *VecTy = dyn_cast<VectorType>(Ty))
979 Cost += getScalarizationOverhead(VecTy, /*Insert*/ false,
980 /*Extract*/ true, CostKind);
981 }
982
983 return Cost;
984 }
985
986 /// Estimate the overhead of scalarizing the inputs and outputs of an
987 /// instruction, with return type RetTy and arguments Args of type Tys. If
988 /// Args are unknown (empty), then the cost associated with one argument is
989 /// added as a heuristic.
995 RetTy, /*Insert*/ true, /*Extract*/ false, CostKind);
996 if (!Args.empty())
998 filterConstantAndDuplicatedOperands(Args, Tys), CostKind);
999 else
1000 // When no information on arguments is provided, we add the cost
1001 // associated with one argument as a heuristic.
1002 Cost += getScalarizationOverhead(RetTy, /*Insert*/ false,
1003 /*Extract*/ true, CostKind);
1004
1005 return Cost;
1006 }
1007
1008 /// Estimate the cost of type-legalization and the legalized type.
1009 std::pair<InstructionCost, MVT> getTypeLegalizationCost(Type *Ty) const {
1010 LLVMContext &C = Ty->getContext();
1011 EVT MTy = getTLI()->getValueType(DL, Ty);
1012
1014 // We keep legalizing the type until we find a legal kind. We assume that
1015 // the only operation that costs anything is the split. After splitting
1016 // we need to handle two types.
1017 while (true) {
1018 TargetLoweringBase::LegalizeKind LK = getTLI()->getTypeConversion(C, MTy);
1019
1021 // Ensure we return a sensible simple VT here, since many callers of
1022 // this function require it.
1023 MVT VT = MTy.isSimple() ? MTy.getSimpleVT() : MVT::i64;
1024 return std::make_pair(InstructionCost::getInvalid(), VT);
1025 }
1026
1027 if (LK.first == TargetLoweringBase::TypeLegal)
1028 return std::make_pair(Cost, MTy.getSimpleVT());
1029
1030 if (LK.first == TargetLoweringBase::TypeSplitVector ||
1032 Cost *= 2;
1033
1034 // Do not loop with f128 type.
1035 if (MTy == LK.second)
1036 return std::make_pair(Cost, MTy.getSimpleVT());
1037
1038 // Keep legalizing the type.
1039 MTy = LK.second;
1040 }
1041 }
1042
1043 unsigned getMaxInterleaveFactor(ElementCount VF) const override { return 1; }
1044
1046 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
1049 ArrayRef<const Value *> Args = {},
1050 const Instruction *CxtI = nullptr) const override {
1051 // Check if any of the operands are vector operands.
1052 const TargetLoweringBase *TLI = getTLI();
1053 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1054 assert(ISD && "Invalid opcode");
1055
1056 // TODO: Handle more cost kinds.
1058 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind,
1059 Opd1Info, Opd2Info,
1060 Args, CxtI);
1061
1062 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
1063
1064 bool IsFloat = Ty->isFPOrFPVectorTy();
1065 // Assume that floating point arithmetic operations cost twice as much as
1066 // integer operations.
1067 InstructionCost OpCost = (IsFloat ? 2 : 1);
1068
1069 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
1070 // The operation is legal. Assume it costs 1.
1071 // TODO: Once we have extract/insert subvector cost we need to use them.
1072 return LT.first * OpCost;
1073 }
1074
1075 if (!TLI->isOperationExpand(ISD, LT.second)) {
1076 // If the operation is custom lowered, then assume that the code is twice
1077 // as expensive.
1078 return LT.first * 2 * OpCost;
1079 }
1080
1081 // An 'Expand' of URem and SRem is special because it may default
1082 // to expanding the operation into a sequence of sub-operations
1083 // i.e. X % Y -> X-(X/Y)*Y.
1084 if (ISD == ISD::UREM || ISD == ISD::SREM) {
1085 bool IsSigned = ISD == ISD::SREM;
1086 if (TLI->isOperationLegalOrCustom(IsSigned ? ISD::SDIVREM : ISD::UDIVREM,
1087 LT.second) ||
1088 TLI->isOperationLegalOrCustom(IsSigned ? ISD::SDIV : ISD::UDIV,
1089 LT.second)) {
1090 unsigned DivOpc = IsSigned ? Instruction::SDiv : Instruction::UDiv;
1091 InstructionCost DivCost = thisT()->getArithmeticInstrCost(
1092 DivOpc, Ty, CostKind, Opd1Info, Opd2Info);
1093 InstructionCost MulCost =
1094 thisT()->getArithmeticInstrCost(Instruction::Mul, Ty, CostKind);
1095 InstructionCost SubCost =
1096 thisT()->getArithmeticInstrCost(Instruction::Sub, Ty, CostKind);
1097 return DivCost + MulCost + SubCost;
1098 }
1099 }
1100
1101 // We cannot scalarize scalable vectors, so return Invalid.
1104
1105 // Else, assume that we need to scalarize this op.
1106 // TODO: If one of the types get legalized by splitting, handle this
1107 // similarly to what getCastInstrCost() does.
1108 if (auto *VTy = dyn_cast<FixedVectorType>(Ty)) {
1109 InstructionCost Cost = thisT()->getArithmeticInstrCost(
1110 Opcode, VTy->getScalarType(), CostKind, Opd1Info, Opd2Info,
1111 Args, CxtI);
1112 // Return the cost of multiple scalar invocation plus the cost of
1113 // inserting and extracting the values.
1114 SmallVector<Type *> Tys(Args.size(), Ty);
1115 return getScalarizationOverhead(VTy, Args, Tys, CostKind) +
1116 VTy->getNumElements() * Cost;
1117 }
1118
1119 // We don't know anything about this scalar instruction.
1120 return OpCost;
1121 }
1122
1124 ArrayRef<int> Mask,
1125 VectorType *SrcTy, int &Index,
1126 VectorType *&SubTy) const {
1127 if (Mask.empty())
1128 return Kind;
1129 int NumDstElts = Mask.size();
1130 int NumSrcElts = SrcTy->getElementCount().getKnownMinValue();
1131 switch (Kind) {
1133 if (ShuffleVectorInst::isReverseMask(Mask, NumSrcElts))
1134 return TTI::SK_Reverse;
1135 if (ShuffleVectorInst::isZeroEltSplatMask(Mask, NumSrcElts))
1136 return TTI::SK_Broadcast;
1137 if (isSplatMask(Mask, NumSrcElts, Index))
1138 return TTI::SK_Broadcast;
1139 if (ShuffleVectorInst::isExtractSubvectorMask(Mask, NumSrcElts, Index) &&
1140 (Index + NumDstElts) <= NumSrcElts) {
1141 SubTy = FixedVectorType::get(SrcTy->getElementType(), NumDstElts);
1143 }
1144 break;
1145 }
1146 case TTI::SK_PermuteTwoSrc: {
1147 if (all_of(Mask, [NumSrcElts](int M) { return M < NumSrcElts; }))
1149 Index, SubTy);
1150 int NumSubElts;
1151 if (NumDstElts > 2 && ShuffleVectorInst::isInsertSubvectorMask(
1152 Mask, NumSrcElts, NumSubElts, Index)) {
1153 if (Index + NumSubElts > NumSrcElts)
1154 return Kind;
1155 SubTy = FixedVectorType::get(SrcTy->getElementType(), NumSubElts);
1157 }
1158 if (ShuffleVectorInst::isSelectMask(Mask, NumSrcElts))
1159 return TTI::SK_Select;
1160 if (ShuffleVectorInst::isTransposeMask(Mask, NumSrcElts))
1161 return TTI::SK_Transpose;
1162 if (ShuffleVectorInst::isSpliceMask(Mask, NumSrcElts, Index))
1163 return TTI::SK_Splice;
1164 break;
1165 }
1166 case TTI::SK_Select:
1167 case TTI::SK_Reverse:
1168 case TTI::SK_Broadcast:
1169 case TTI::SK_Transpose:
1172 case TTI::SK_Splice:
1173 break;
1174 }
1175 return Kind;
1176 }
1177
1181 VectorType *SubTp, ArrayRef<const Value *> Args = {},
1182 const Instruction *CxtI = nullptr) const override {
1183 switch (improveShuffleKindFromMask(Kind, Mask, SrcTy, Index, SubTp)) {
1184 case TTI::SK_Broadcast:
1185 if (auto *FVT = dyn_cast<FixedVectorType>(SrcTy))
1186 return getBroadcastShuffleOverhead(FVT, CostKind);
1188 case TTI::SK_Select:
1189 case TTI::SK_Splice:
1190 case TTI::SK_Reverse:
1191 case TTI::SK_Transpose:
1194 if (auto *FVT = dyn_cast<FixedVectorType>(SrcTy))
1195 return getPermuteShuffleOverhead(FVT, CostKind);
1198 return getExtractSubvectorOverhead(SrcTy, CostKind, Index,
1199 cast<FixedVectorType>(SubTp));
1201 return getInsertSubvectorOverhead(DstTy, CostKind, Index,
1202 cast<FixedVectorType>(SubTp));
1203 }
1204 llvm_unreachable("Unknown TTI::ShuffleKind");
1205 }
1206
1208 getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
1210 const Instruction *I = nullptr) const override {
1211 if (BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I) == 0)
1212 return 0;
1213
1214 const TargetLoweringBase *TLI = getTLI();
1215 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1216 assert(ISD && "Invalid opcode");
1217 std::pair<InstructionCost, MVT> SrcLT = getTypeLegalizationCost(Src);
1218 std::pair<InstructionCost, MVT> DstLT = getTypeLegalizationCost(Dst);
1219
1220 TypeSize SrcSize = SrcLT.second.getSizeInBits();
1221 TypeSize DstSize = DstLT.second.getSizeInBits();
1222 bool IntOrPtrSrc = Src->isIntegerTy() || Src->isPointerTy();
1223 bool IntOrPtrDst = Dst->isIntegerTy() || Dst->isPointerTy();
1224
1225 switch (Opcode) {
1226 default:
1227 break;
1228 case Instruction::Trunc:
1229 // Check for NOOP conversions.
1230 if (TLI->isTruncateFree(SrcLT.second, DstLT.second))
1231 return 0;
1232 [[fallthrough]];
1233 case Instruction::BitCast:
1234 // Bitcast between types that are legalized to the same type are free and
1235 // assume int to/from ptr of the same size is also free.
1236 if (SrcLT.first == DstLT.first && IntOrPtrSrc == IntOrPtrDst &&
1237 SrcSize == DstSize)
1238 return 0;
1239 break;
1240 case Instruction::FPExt:
1241 if (I && getTLI()->isExtFree(I))
1242 return 0;
1243 break;
1244 case Instruction::ZExt:
1245 if (TLI->isZExtFree(SrcLT.second, DstLT.second))
1246 return 0;
1247 [[fallthrough]];
1248 case Instruction::SExt:
1249 if (I && getTLI()->isExtFree(I))
1250 return 0;
1251
1252 // If this is a zext/sext of a load, return 0 if the corresponding
1253 // extending load exists on target and the result type is legal.
1254 if (CCH == TTI::CastContextHint::Normal) {
1255 EVT ExtVT = EVT::getEVT(Dst);
1256 EVT LoadVT = EVT::getEVT(Src);
1257 unsigned LType =
1258 ((Opcode == Instruction::ZExt) ? ISD::ZEXTLOAD : ISD::SEXTLOAD);
1259 if (DstLT.first == SrcLT.first &&
1260 TLI->isLoadExtLegal(LType, ExtVT, LoadVT))
1261 return 0;
1262 }
1263 break;
1264 case Instruction::AddrSpaceCast:
1265 if (TLI->isFreeAddrSpaceCast(Src->getPointerAddressSpace(),
1266 Dst->getPointerAddressSpace()))
1267 return 0;
1268 break;
1269 }
1270
1271 auto *SrcVTy = dyn_cast<VectorType>(Src);
1272 auto *DstVTy = dyn_cast<VectorType>(Dst);
1273
1274 // If the cast is marked as legal (or promote) then assume low cost.
1275 if (SrcLT.first == DstLT.first &&
1276 TLI->isOperationLegalOrPromote(ISD, DstLT.second))
1277 return SrcLT.first;
1278
1279 // Handle scalar conversions.
1280 if (!SrcVTy && !DstVTy) {
1281 // Just check the op cost. If the operation is legal then assume it costs
1282 // 1.
1283 if (!TLI->isOperationExpand(ISD, DstLT.second))
1284 return 1;
1285
1286 // Assume that illegal scalar instruction are expensive.
1287 return 4;
1288 }
1289
1290 // Check vector-to-vector casts.
1291 if (DstVTy && SrcVTy) {
1292 // If the cast is between same-sized registers, then the check is simple.
1293 if (SrcLT.first == DstLT.first && SrcSize == DstSize) {
1294
1295 // Assume that Zext is done using AND.
1296 if (Opcode == Instruction::ZExt)
1297 return SrcLT.first;
1298
1299 // Assume that sext is done using SHL and SRA.
1300 if (Opcode == Instruction::SExt)
1301 return SrcLT.first * 2;
1302
1303 // Just check the op cost. If the operation is legal then assume it
1304 // costs
1305 // 1 and multiply by the type-legalization overhead.
1306 if (!TLI->isOperationExpand(ISD, DstLT.second))
1307 return SrcLT.first * 1;
1308 }
1309
1310 // If we are legalizing by splitting, query the concrete TTI for the cost
1311 // of casting the original vector twice. We also need to factor in the
1312 // cost of the split itself. Count that as 1, to be consistent with
1313 // getTypeLegalizationCost().
1314 bool SplitSrc =
1315 TLI->getTypeAction(Src->getContext(), TLI->getValueType(DL, Src)) ==
1317 bool SplitDst =
1318 TLI->getTypeAction(Dst->getContext(), TLI->getValueType(DL, Dst)) ==
1320 if ((SplitSrc || SplitDst) && SrcVTy->getElementCount().isKnownEven() &&
1321 DstVTy->getElementCount().isKnownEven()) {
1322 Type *SplitDstTy = VectorType::getHalfElementsVectorType(DstVTy);
1323 Type *SplitSrcTy = VectorType::getHalfElementsVectorType(SrcVTy);
1324 const T *TTI = thisT();
1325 // If both types need to be split then the split is free.
1326 InstructionCost SplitCost =
1327 (!SplitSrc || !SplitDst) ? TTI->getVectorSplitCost() : 0;
1328 return SplitCost +
1329 (2 * TTI->getCastInstrCost(Opcode, SplitDstTy, SplitSrcTy, CCH,
1330 CostKind, I));
1331 }
1332
1333 // Scalarization cost is Invalid, can't assume any num elements.
1334 if (isa<ScalableVectorType>(DstVTy))
1336
1337 // In other cases where the source or destination are illegal, assume
1338 // the operation will get scalarized.
1339 unsigned Num = cast<FixedVectorType>(DstVTy)->getNumElements();
1340 InstructionCost Cost = thisT()->getCastInstrCost(
1341 Opcode, Dst->getScalarType(), Src->getScalarType(), CCH, CostKind, I);
1342
1343 // Return the cost of multiple scalar invocation plus the cost of
1344 // inserting and extracting the values.
1345 return getScalarizationOverhead(DstVTy, /*Insert*/ true, /*Extract*/ true,
1346 CostKind) +
1347 Num * Cost;
1348 }
1349
1350 // We already handled vector-to-vector and scalar-to-scalar conversions.
1351 // This
1352 // is where we handle bitcast between vectors and scalars. We need to assume
1353 // that the conversion is scalarized in one way or another.
1354 if (Opcode == Instruction::BitCast) {
1355 // Illegal bitcasts are done by storing and loading from a stack slot.
1356 return (SrcVTy ? getScalarizationOverhead(SrcVTy, /*Insert*/ false,
1357 /*Extract*/ true, CostKind)
1358 : 0) +
1359 (DstVTy ? getScalarizationOverhead(DstVTy, /*Insert*/ true,
1360 /*Extract*/ false, CostKind)
1361 : 0);
1362 }
1363
1364 llvm_unreachable("Unhandled cast");
1365 }
1366
1368 getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy,
1369 unsigned Index,
1370 TTI::TargetCostKind CostKind) const override {
1371 return thisT()->getVectorInstrCost(Instruction::ExtractElement, VecTy,
1372 CostKind, Index, nullptr, nullptr) +
1373 thisT()->getCastInstrCost(Opcode, Dst, VecTy->getElementType(),
1375 }
1376
1379 const Instruction *I = nullptr) const override {
1380 return BaseT::getCFInstrCost(Opcode, CostKind, I);
1381 }
1382
1384 unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred,
1388 const Instruction *I = nullptr) const override {
1389 const TargetLoweringBase *TLI = getTLI();
1390 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1391 assert(ISD && "Invalid opcode");
1392
1393 if (getTLI()->getValueType(DL, ValTy, true) == MVT::Other)
1394 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
1395 Op1Info, Op2Info, I);
1396
1397 // Selects on vectors are actually vector selects.
1398 if (ISD == ISD::SELECT) {
1399 assert(CondTy && "CondTy must exist");
1400 if (CondTy->isVectorTy())
1401 ISD = ISD::VSELECT;
1402 }
1403 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
1404
1405 if (!(ValTy->isVectorTy() && !LT.second.isVector()) &&
1406 !TLI->isOperationExpand(ISD, LT.second)) {
1407 // The operation is legal. Assume it costs 1. Multiply
1408 // by the type-legalization overhead.
1409 return LT.first * 1;
1410 }
1411
1412 // Otherwise, assume that the cast is scalarized.
1413 // TODO: If one of the types get legalized by splitting, handle this
1414 // similarly to what getCastInstrCost() does.
1415 if (auto *ValVTy = dyn_cast<VectorType>(ValTy)) {
1416 if (isa<ScalableVectorType>(ValTy))
1418
1419 unsigned Num = cast<FixedVectorType>(ValVTy)->getNumElements();
1420 InstructionCost Cost = thisT()->getCmpSelInstrCost(
1421 Opcode, ValVTy->getScalarType(), CondTy->getScalarType(), VecPred,
1422 CostKind, Op1Info, Op2Info, I);
1423
1424 // Return the cost of multiple scalar invocation plus the cost of
1425 // inserting and extracting the values.
1426 return getScalarizationOverhead(ValVTy, /*Insert*/ true,
1427 /*Extract*/ false, CostKind) +
1428 Num * Cost;
1429 }
1430
1431 // Unknown scalar opcode.
1432 return 1;
1433 }
1434
1437 unsigned Index, const Value *Op0,
1438 const Value *Op1) const override {
1439 return getRegUsageForType(Val->getScalarType());
1440 }
1441
1442 /// \param ScalarUserAndIdx encodes the information about extracts from a
1443 /// vector with 'Scalar' being the value being extracted,'User' being the user
1444 /// of the extract(nullptr if user is not known before vectorization) and
1445 /// 'Idx' being the extract lane.
1448 unsigned Index, Value *Scalar,
1449 ArrayRef<std::tuple<Value *, User *, int>>
1450 ScalarUserAndIdx) const override {
1451 return thisT()->getVectorInstrCost(Opcode, Val, CostKind, Index, nullptr,
1452 nullptr);
1453 }
1454
1457 unsigned Index) const override {
1458 Value *Op0 = nullptr;
1459 Value *Op1 = nullptr;
1460 if (auto *IE = dyn_cast<InsertElementInst>(&I)) {
1461 Op0 = IE->getOperand(0);
1462 Op1 = IE->getOperand(1);
1463 }
1464 return thisT()->getVectorInstrCost(I.getOpcode(), Val, CostKind, Index, Op0,
1465 Op1);
1466 }
1467
1471 unsigned Index) const override {
1472 unsigned NewIndex = -1;
1473 if (auto *FVTy = dyn_cast<FixedVectorType>(Val)) {
1474 assert(Index < FVTy->getNumElements() &&
1475 "Unexpected index from end of vector");
1476 NewIndex = FVTy->getNumElements() - 1 - Index;
1477 }
1478 return thisT()->getVectorInstrCost(Opcode, Val, CostKind, NewIndex, nullptr,
1479 nullptr);
1480 }
1481
1483 getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF,
1484 const APInt &DemandedDstElts,
1485 TTI::TargetCostKind CostKind) const override {
1486 assert(DemandedDstElts.getBitWidth() == (unsigned)VF * ReplicationFactor &&
1487 "Unexpected size of DemandedDstElts.");
1488
1490
1491 auto *SrcVT = FixedVectorType::get(EltTy, VF);
1492 auto *ReplicatedVT = FixedVectorType::get(EltTy, VF * ReplicationFactor);
1493
1494 // The Mask shuffling cost is extract all the elements of the Mask
1495 // and insert each of them Factor times into the wide vector:
1496 //
1497 // E.g. an interleaved group with factor 3:
1498 // %mask = icmp ult <8 x i32> %vec1, %vec2
1499 // %interleaved.mask = shufflevector <8 x i1> %mask, <8 x i1> undef,
1500 // <24 x i32> <0,0,0,1,1,1,2,2,2,3,3,3,4,4,4,5,5,5,6,6,6,7,7,7>
1501 // The cost is estimated as extract all mask elements from the <8xi1> mask
1502 // vector and insert them factor times into the <24xi1> shuffled mask
1503 // vector.
1504 APInt DemandedSrcElts = APIntOps::ScaleBitMask(DemandedDstElts, VF);
1505 Cost += thisT()->getScalarizationOverhead(SrcVT, DemandedSrcElts,
1506 /*Insert*/ false,
1507 /*Extract*/ true, CostKind);
1508 Cost += thisT()->getScalarizationOverhead(ReplicatedVT, DemandedDstElts,
1509 /*Insert*/ true,
1510 /*Extract*/ false, CostKind);
1511
1512 return Cost;
1513 }
1514
1516 unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
1519 const Instruction *I = nullptr) const override {
1520 assert(!Src->isVoidTy() && "Invalid type");
1521 // Assume types, such as structs, are expensive.
1522 if (getTLI()->getValueType(DL, Src, true) == MVT::Other)
1523 return 4;
1524 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Src);
1525
1526 // Assuming that all loads of legal types cost 1.
1527 InstructionCost Cost = LT.first;
1529 return Cost;
1530
1531 const DataLayout &DL = this->getDataLayout();
1532 if (Src->isVectorTy() &&
1533 // In practice it's not currently possible to have a change in lane
1534 // length for extending loads or truncating stores so both types should
1535 // have the same scalable property.
1536 TypeSize::isKnownLT(DL.getTypeStoreSizeInBits(Src),
1537 LT.second.getSizeInBits())) {
1538 // This is a vector load that legalizes to a larger type than the vector
1539 // itself. Unless the corresponding extending load or truncating store is
1540 // legal, then this will scalarize.
1542 EVT MemVT = getTLI()->getValueType(DL, Src);
1543 if (Opcode == Instruction::Store)
1544 LA = getTLI()->getTruncStoreAction(LT.second, MemVT);
1545 else
1546 LA = getTLI()->getLoadExtAction(ISD::EXTLOAD, LT.second, MemVT);
1547
1548 if (LA != TargetLowering::Legal && LA != TargetLowering::Custom) {
1549 // This is a vector load/store for some illegal type that is scalarized.
1550 // We must account for the cost of building or decomposing the vector.
1552 cast<VectorType>(Src), Opcode != Instruction::Store,
1553 Opcode == Instruction::Store, CostKind);
1554 }
1555 }
1556
1557 return Cost;
1558 }
1559
1561 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
1562 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
1563 bool UseMaskForCond = false, bool UseMaskForGaps = false) const override {
1564
1565 // We cannot scalarize scalable vectors, so return Invalid.
1566 if (isa<ScalableVectorType>(VecTy))
1568
1569 auto *VT = cast<FixedVectorType>(VecTy);
1570
1571 unsigned NumElts = VT->getNumElements();
1572 assert(Factor > 1 && NumElts % Factor == 0 && "Invalid interleave factor");
1573
1574 unsigned NumSubElts = NumElts / Factor;
1575 auto *SubVT = FixedVectorType::get(VT->getElementType(), NumSubElts);
1576
1577 // Firstly, the cost of load/store operation.
1579 if (UseMaskForCond || UseMaskForGaps) {
1580 unsigned IID = Opcode == Instruction::Load ? Intrinsic::masked_load
1581 : Intrinsic::masked_store;
1582 Cost = thisT()->getMemIntrinsicInstrCost(
1583 MemIntrinsicCostAttributes(IID, VecTy, Alignment, AddressSpace),
1584 CostKind);
1585 } else
1586 Cost = thisT()->getMemoryOpCost(Opcode, VecTy, Alignment, AddressSpace,
1587 CostKind);
1588
1589 // Legalize the vector type, and get the legalized and unlegalized type
1590 // sizes.
1591 MVT VecTyLT = getTypeLegalizationCost(VecTy).second;
1592 unsigned VecTySize = thisT()->getDataLayout().getTypeStoreSize(VecTy);
1593 unsigned VecTyLTSize = VecTyLT.getStoreSize();
1594
1595 // Scale the cost of the memory operation by the fraction of legalized
1596 // instructions that will actually be used. We shouldn't account for the
1597 // cost of dead instructions since they will be removed.
1598 //
1599 // E.g., An interleaved load of factor 8:
1600 // %vec = load <16 x i64>, <16 x i64>* %ptr
1601 // %v0 = shufflevector %vec, undef, <0, 8>
1602 //
1603 // If <16 x i64> is legalized to 8 v2i64 loads, only 2 of the loads will be
1604 // used (those corresponding to elements [0:1] and [8:9] of the unlegalized
1605 // type). The other loads are unused.
1606 //
1607 // TODO: Note that legalization can turn masked loads/stores into unmasked
1608 // (legalized) loads/stores. This can be reflected in the cost.
1609 if (Cost.isValid() && VecTySize > VecTyLTSize) {
1610 // The number of loads of a legal type it will take to represent a load
1611 // of the unlegalized vector type.
1612 unsigned NumLegalInsts = divideCeil(VecTySize, VecTyLTSize);
1613
1614 // The number of elements of the unlegalized type that correspond to a
1615 // single legal instruction.
1616 unsigned NumEltsPerLegalInst = divideCeil(NumElts, NumLegalInsts);
1617
1618 // Determine which legal instructions will be used.
1619 BitVector UsedInsts(NumLegalInsts, false);
1620 for (unsigned Index : Indices)
1621 for (unsigned Elt = 0; Elt < NumSubElts; ++Elt)
1622 UsedInsts.set((Index + Elt * Factor) / NumEltsPerLegalInst);
1623
1624 // Scale the cost of the load by the fraction of legal instructions that
1625 // will be used.
1626 Cost = divideCeil(UsedInsts.count() * Cost.getValue(), NumLegalInsts);
1627 }
1628
1629 // Then plus the cost of interleave operation.
1630 assert(Indices.size() <= Factor &&
1631 "Interleaved memory op has too many members");
1632
1633 const APInt DemandedAllSubElts = APInt::getAllOnes(NumSubElts);
1634 const APInt DemandedAllResultElts = APInt::getAllOnes(NumElts);
1635
1636 APInt DemandedLoadStoreElts = APInt::getZero(NumElts);
1637 for (unsigned Index : Indices) {
1638 assert(Index < Factor && "Invalid index for interleaved memory op");
1639 for (unsigned Elm = 0; Elm < NumSubElts; Elm++)
1640 DemandedLoadStoreElts.setBit(Index + Elm * Factor);
1641 }
1642
1643 if (Opcode == Instruction::Load) {
1644 // The interleave cost is similar to extract sub vectors' elements
1645 // from the wide vector, and insert them into sub vectors.
1646 //
1647 // E.g. An interleaved load of factor 2 (with one member of index 0):
1648 // %vec = load <8 x i32>, <8 x i32>* %ptr
1649 // %v0 = shuffle %vec, undef, <0, 2, 4, 6> ; Index 0
1650 // The cost is estimated as extract elements at 0, 2, 4, 6 from the
1651 // <8 x i32> vector and insert them into a <4 x i32> vector.
1652 InstructionCost InsSubCost = thisT()->getScalarizationOverhead(
1653 SubVT, DemandedAllSubElts,
1654 /*Insert*/ true, /*Extract*/ false, CostKind);
1655 Cost += Indices.size() * InsSubCost;
1656 Cost += thisT()->getScalarizationOverhead(VT, DemandedLoadStoreElts,
1657 /*Insert*/ false,
1658 /*Extract*/ true, CostKind);
1659 } else {
1660 // The interleave cost is extract elements from sub vectors, and
1661 // insert them into the wide vector.
1662 //
1663 // E.g. An interleaved store of factor 3 with 2 members at indices 0,1:
1664 // (using VF=4):
1665 // %v0_v1 = shuffle %v0, %v1, <0,4,undef,1,5,undef,2,6,undef,3,7,undef>
1666 // %gaps.mask = <true, true, false, true, true, false,
1667 // true, true, false, true, true, false>
1668 // call llvm.masked.store <12 x i32> %v0_v1, <12 x i32>* %ptr,
1669 // i32 Align, <12 x i1> %gaps.mask
1670 // The cost is estimated as extract all elements (of actual members,
1671 // excluding gaps) from both <4 x i32> vectors and insert into the <12 x
1672 // i32> vector.
1673 InstructionCost ExtSubCost = thisT()->getScalarizationOverhead(
1674 SubVT, DemandedAllSubElts,
1675 /*Insert*/ false, /*Extract*/ true, CostKind);
1676 Cost += ExtSubCost * Indices.size();
1677 Cost += thisT()->getScalarizationOverhead(VT, DemandedLoadStoreElts,
1678 /*Insert*/ true,
1679 /*Extract*/ false, CostKind);
1680 }
1681
1682 if (!UseMaskForCond)
1683 return Cost;
1684
1685 Type *I8Type = Type::getInt8Ty(VT->getContext());
1686
1687 Cost += thisT()->getReplicationShuffleCost(
1688 I8Type, Factor, NumSubElts,
1689 UseMaskForGaps ? DemandedLoadStoreElts : DemandedAllResultElts,
1690 CostKind);
1691
1692 // The Gaps mask is invariant and created outside the loop, therefore the
1693 // cost of creating it is not accounted for here. However if we have both
1694 // a MaskForGaps and some other mask that guards the execution of the
1695 // memory access, we need to account for the cost of And-ing the two masks
1696 // inside the loop.
1697 if (UseMaskForGaps) {
1698 auto *MaskVT = FixedVectorType::get(I8Type, NumElts);
1699 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::And, MaskVT,
1700 CostKind);
1701 }
1702
1703 return Cost;
1704 }
1705
1706 /// Get intrinsic cost based on arguments.
1709 TTI::TargetCostKind CostKind) const override {
1710 // Check for generically free intrinsics.
1712 return 0;
1713
1714 // Assume that target intrinsics are cheap.
1715 Intrinsic::ID IID = ICA.getID();
1718
1719 // VP Intrinsics should have the same cost as their non-vp counterpart.
1720 // TODO: Adjust the cost to make the vp intrinsic cheaper than its non-vp
1721 // counterpart when the vector length argument is smaller than the maximum
1722 // vector length.
1723 // TODO: Support other kinds of VPIntrinsics
1724 if (VPIntrinsic::isVPIntrinsic(ICA.getID())) {
1725 std::optional<unsigned> FOp =
1727 if (FOp) {
1728 if (ICA.getID() == Intrinsic::vp_load) {
1729 Align Alignment;
1730 if (auto *VPI = dyn_cast_or_null<VPIntrinsic>(ICA.getInst()))
1731 Alignment = VPI->getPointerAlignment().valueOrOne();
1732 unsigned AS = 0;
1733 if (ICA.getArgTypes().size() > 1)
1734 if (auto *PtrTy = dyn_cast<PointerType>(ICA.getArgTypes()[0]))
1735 AS = PtrTy->getAddressSpace();
1736 return thisT()->getMemoryOpCost(*FOp, ICA.getReturnType(), Alignment,
1737 AS, CostKind);
1738 }
1739 if (ICA.getID() == Intrinsic::vp_store) {
1740 Align Alignment;
1741 if (auto *VPI = dyn_cast_or_null<VPIntrinsic>(ICA.getInst()))
1742 Alignment = VPI->getPointerAlignment().valueOrOne();
1743 unsigned AS = 0;
1744 if (ICA.getArgTypes().size() >= 2)
1745 if (auto *PtrTy = dyn_cast<PointerType>(ICA.getArgTypes()[1]))
1746 AS = PtrTy->getAddressSpace();
1747 return thisT()->getMemoryOpCost(*FOp, ICA.getArgTypes()[0], Alignment,
1748 AS, CostKind);
1749 }
1751 ICA.getID() == Intrinsic::vp_fneg) {
1752 return thisT()->getArithmeticInstrCost(*FOp, ICA.getReturnType(),
1753 CostKind);
1754 }
1755 if (VPCastIntrinsic::isVPCast(ICA.getID())) {
1756 return thisT()->getCastInstrCost(
1757 *FOp, ICA.getReturnType(), ICA.getArgTypes()[0],
1759 }
1760 if (VPCmpIntrinsic::isVPCmp(ICA.getID())) {
1761 // We can only handle vp_cmp intrinsics with underlying instructions.
1762 if (ICA.getInst()) {
1763 assert(FOp);
1764 auto *UI = cast<VPCmpIntrinsic>(ICA.getInst());
1765 return thisT()->getCmpSelInstrCost(*FOp, ICA.getArgTypes()[0],
1766 ICA.getReturnType(),
1767 UI->getPredicate(), CostKind);
1768 }
1769 }
1770 }
1771 if (ICA.getID() == Intrinsic::vp_load_ff) {
1772 Type *RetTy = ICA.getReturnType();
1773 Type *DataTy = cast<StructType>(RetTy)->getElementType(0);
1774 Align Alignment;
1775 if (auto *VPI = dyn_cast_or_null<VPIntrinsic>(ICA.getInst()))
1776 Alignment = VPI->getPointerAlignment().valueOrOne();
1777 return thisT()->getMemIntrinsicInstrCost(
1778 MemIntrinsicCostAttributes(ICA.getID(), DataTy, Alignment),
1779 CostKind);
1780 }
1781 if (ICA.getID() == Intrinsic::vp_scatter) {
1782 if (ICA.isTypeBasedOnly()) {
1783 IntrinsicCostAttributes MaskedScatter(
1786 ICA.getFlags());
1787 return getTypeBasedIntrinsicInstrCost(MaskedScatter, CostKind);
1788 }
1789 Align Alignment;
1790 if (auto *VPI = dyn_cast_or_null<VPIntrinsic>(ICA.getInst()))
1791 Alignment = VPI->getPointerAlignment().valueOrOne();
1792 bool VarMask = isa<Constant>(ICA.getArgs()[2]);
1793 return thisT()->getMemIntrinsicInstrCost(
1794 MemIntrinsicCostAttributes(Intrinsic::vp_scatter,
1795 ICA.getArgTypes()[0], ICA.getArgs()[1],
1796 VarMask, Alignment, nullptr),
1797 CostKind);
1798 }
1799 if (ICA.getID() == Intrinsic::vp_gather) {
1800 if (ICA.isTypeBasedOnly()) {
1801 IntrinsicCostAttributes MaskedGather(
1804 ICA.getFlags());
1805 return getTypeBasedIntrinsicInstrCost(MaskedGather, CostKind);
1806 }
1807 Align Alignment;
1808 if (auto *VPI = dyn_cast_or_null<VPIntrinsic>(ICA.getInst()))
1809 Alignment = VPI->getPointerAlignment().valueOrOne();
1810 bool VarMask = isa<Constant>(ICA.getArgs()[1]);
1811 return thisT()->getMemIntrinsicInstrCost(
1812 MemIntrinsicCostAttributes(Intrinsic::vp_gather,
1813 ICA.getReturnType(), ICA.getArgs()[0],
1814 VarMask, Alignment, nullptr),
1815 CostKind);
1816 }
1817
1818 if (ICA.getID() == Intrinsic::vp_select ||
1819 ICA.getID() == Intrinsic::vp_merge) {
1820 TTI::OperandValueInfo OpInfoX, OpInfoY;
1821 if (!ICA.isTypeBasedOnly()) {
1822 OpInfoX = TTI::getOperandInfo(ICA.getArgs()[0]);
1823 OpInfoY = TTI::getOperandInfo(ICA.getArgs()[1]);
1824 }
1825 return getCmpSelInstrCost(
1826 Instruction::Select, ICA.getReturnType(), ICA.getArgTypes()[0],
1827 CmpInst::BAD_ICMP_PREDICATE, CostKind, OpInfoX, OpInfoY);
1828 }
1829
1830 std::optional<Intrinsic::ID> FID =
1832
1833 // Not functionally equivalent but close enough for cost modelling.
1834 if (ICA.getID() == Intrinsic::experimental_vp_reverse)
1835 FID = Intrinsic::vector_reverse;
1836
1837 if (FID) {
1838 // Non-vp version will have same arg types except mask and vector
1839 // length.
1840 assert(ICA.getArgTypes().size() >= 2 &&
1841 "Expected VPIntrinsic to have Mask and Vector Length args and "
1842 "types");
1843
1844 ArrayRef<const Value *> NewArgs = ArrayRef(ICA.getArgs());
1845 if (!ICA.isTypeBasedOnly())
1846 NewArgs = NewArgs.drop_back(2);
1848
1849 // VPReduction intrinsics have a start value argument that their non-vp
1850 // counterparts do not have, except for the fadd and fmul non-vp
1851 // counterpart.
1853 *FID != Intrinsic::vector_reduce_fadd &&
1854 *FID != Intrinsic::vector_reduce_fmul) {
1855 if (!ICA.isTypeBasedOnly())
1856 NewArgs = NewArgs.drop_front();
1857 NewTys = NewTys.drop_front();
1858 }
1859
1860 IntrinsicCostAttributes NewICA(*FID, ICA.getReturnType(), NewArgs,
1861 NewTys, ICA.getFlags());
1862 return thisT()->getIntrinsicInstrCost(NewICA, CostKind);
1863 }
1864 }
1865
1866 if (ICA.isTypeBasedOnly())
1868
1869 Type *RetTy = ICA.getReturnType();
1870
1871 ElementCount RetVF = isVectorizedTy(RetTy) ? getVectorizedTypeVF(RetTy)
1873
1874 const IntrinsicInst *I = ICA.getInst();
1875 const SmallVectorImpl<const Value *> &Args = ICA.getArgs();
1876 FastMathFlags FMF = ICA.getFlags();
1877 switch (IID) {
1878 default:
1879 break;
1880
1881 case Intrinsic::powi:
1882 if (auto *RHSC = dyn_cast<ConstantInt>(Args[1])) {
1883 bool ShouldOptForSize = I->getParent()->getParent()->hasOptSize();
1884 if (getTLI()->isBeneficialToExpandPowI(RHSC->getSExtValue(),
1885 ShouldOptForSize)) {
1886 // The cost is modeled on the expansion performed by ExpandPowI in
1887 // SelectionDAGBuilder.
1888 APInt Exponent = RHSC->getValue().abs();
1889 unsigned ActiveBits = Exponent.getActiveBits();
1890 unsigned PopCount = Exponent.popcount();
1891 InstructionCost Cost = (ActiveBits + PopCount - 2) *
1892 thisT()->getArithmeticInstrCost(
1893 Instruction::FMul, RetTy, CostKind);
1894 if (RHSC->isNegative())
1895 Cost += thisT()->getArithmeticInstrCost(Instruction::FDiv, RetTy,
1896 CostKind);
1897 return Cost;
1898 }
1899 }
1900 break;
1901 case Intrinsic::cttz:
1902 // FIXME: If necessary, this should go in target-specific overrides.
1903 if (RetVF.isScalar() && getTLI()->isCheapToSpeculateCttz(RetTy))
1905 break;
1906
1907 case Intrinsic::ctlz:
1908 // FIXME: If necessary, this should go in target-specific overrides.
1909 if (RetVF.isScalar() && getTLI()->isCheapToSpeculateCtlz(RetTy))
1911 break;
1912
1913 case Intrinsic::memcpy:
1914 return thisT()->getMemcpyCost(ICA.getInst());
1915
1916 case Intrinsic::masked_scatter: {
1917 const Value *Mask = Args[2];
1918 bool VarMask = !isa<Constant>(Mask);
1919 Align Alignment = I->getParamAlign(1).valueOrOne();
1920 return thisT()->getMemIntrinsicInstrCost(
1921 MemIntrinsicCostAttributes(Intrinsic::masked_scatter,
1922 ICA.getArgTypes()[0], Args[1], VarMask,
1923 Alignment, I),
1924 CostKind);
1925 }
1926 case Intrinsic::masked_gather: {
1927 const Value *Mask = Args[1];
1928 bool VarMask = !isa<Constant>(Mask);
1929 Align Alignment = I->getParamAlign(0).valueOrOne();
1930 return thisT()->getMemIntrinsicInstrCost(
1931 MemIntrinsicCostAttributes(Intrinsic::masked_gather, RetTy, Args[0],
1932 VarMask, Alignment, I),
1933 CostKind);
1934 }
1935 case Intrinsic::masked_compressstore: {
1936 const Value *Data = Args[0];
1937 const Value *Mask = Args[2];
1938 Align Alignment = I->getParamAlign(1).valueOrOne();
1939 return thisT()->getMemIntrinsicInstrCost(
1940 MemIntrinsicCostAttributes(IID, Data->getType(), !isa<Constant>(Mask),
1941 Alignment, I),
1942 CostKind);
1943 }
1944 case Intrinsic::masked_expandload: {
1945 const Value *Mask = Args[1];
1946 Align Alignment = I->getParamAlign(0).valueOrOne();
1947 return thisT()->getMemIntrinsicInstrCost(
1948 MemIntrinsicCostAttributes(IID, RetTy, !isa<Constant>(Mask),
1949 Alignment, I),
1950 CostKind);
1951 }
1952 case Intrinsic::experimental_vp_strided_store: {
1953 const Value *Data = Args[0];
1954 const Value *Ptr = Args[1];
1955 const Value *Mask = Args[3];
1956 const Value *EVL = Args[4];
1957 bool VarMask = !isa<Constant>(Mask) || !isa<Constant>(EVL);
1958 Type *EltTy = cast<VectorType>(Data->getType())->getElementType();
1959 Align Alignment =
1960 I->getParamAlign(1).value_or(thisT()->DL.getABITypeAlign(EltTy));
1961 return thisT()->getMemIntrinsicInstrCost(
1962 MemIntrinsicCostAttributes(IID, Data->getType(), Ptr, VarMask,
1963 Alignment, I),
1964 CostKind);
1965 }
1966 case Intrinsic::experimental_vp_strided_load: {
1967 const Value *Ptr = Args[0];
1968 const Value *Mask = Args[2];
1969 const Value *EVL = Args[3];
1970 bool VarMask = !isa<Constant>(Mask) || !isa<Constant>(EVL);
1971 Type *EltTy = cast<VectorType>(RetTy)->getElementType();
1972 Align Alignment =
1973 I->getParamAlign(0).value_or(thisT()->DL.getABITypeAlign(EltTy));
1974 return thisT()->getMemIntrinsicInstrCost(
1975 MemIntrinsicCostAttributes(IID, RetTy, Ptr, VarMask, Alignment, I),
1976 CostKind);
1977 }
1978 case Intrinsic::stepvector: {
1979 if (isa<ScalableVectorType>(RetTy))
1981 // The cost of materialising a constant integer vector.
1983 }
1984 case Intrinsic::vector_extract: {
1985 // FIXME: Handle case where a scalable vector is extracted from a scalable
1986 // vector
1987 if (isa<ScalableVectorType>(RetTy))
1989 unsigned Index = cast<ConstantInt>(Args[1])->getZExtValue();
1990 return thisT()->getShuffleCost(TTI::SK_ExtractSubvector,
1991 cast<VectorType>(RetTy),
1992 cast<VectorType>(Args[0]->getType()), {},
1993 CostKind, Index, cast<VectorType>(RetTy));
1994 }
1995 case Intrinsic::vector_insert: {
1996 // FIXME: Handle case where a scalable vector is inserted into a scalable
1997 // vector
1998 if (isa<ScalableVectorType>(Args[1]->getType()))
2000 unsigned Index = cast<ConstantInt>(Args[2])->getZExtValue();
2001 return thisT()->getShuffleCost(
2003 cast<VectorType>(Args[0]->getType()), {}, CostKind, Index,
2004 cast<VectorType>(Args[1]->getType()));
2005 }
2006 case Intrinsic::vector_splice: {
2007 unsigned Index = cast<ConstantInt>(Args[2])->getZExtValue();
2008 return thisT()->getShuffleCost(TTI::SK_Splice, cast<VectorType>(RetTy),
2009 cast<VectorType>(Args[0]->getType()), {},
2010 CostKind, Index, cast<VectorType>(RetTy));
2011 }
2012 case Intrinsic::vector_reduce_add:
2013 case Intrinsic::vector_reduce_mul:
2014 case Intrinsic::vector_reduce_and:
2015 case Intrinsic::vector_reduce_or:
2016 case Intrinsic::vector_reduce_xor:
2017 case Intrinsic::vector_reduce_smax:
2018 case Intrinsic::vector_reduce_smin:
2019 case Intrinsic::vector_reduce_fmax:
2020 case Intrinsic::vector_reduce_fmin:
2021 case Intrinsic::vector_reduce_fmaximum:
2022 case Intrinsic::vector_reduce_fminimum:
2023 case Intrinsic::vector_reduce_umax:
2024 case Intrinsic::vector_reduce_umin: {
2025 IntrinsicCostAttributes Attrs(IID, RetTy, Args[0]->getType(), FMF, I, 1);
2027 }
2028 case Intrinsic::vector_reduce_fadd:
2029 case Intrinsic::vector_reduce_fmul: {
2031 IID, RetTy, {Args[0]->getType(), Args[1]->getType()}, FMF, I, 1);
2033 }
2034 case Intrinsic::fshl:
2035 case Intrinsic::fshr: {
2036 const Value *X = Args[0];
2037 const Value *Y = Args[1];
2038 const Value *Z = Args[2];
2041 const TTI::OperandValueInfo OpInfoZ = TTI::getOperandInfo(Z);
2042
2043 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
2044 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
2046 Cost +=
2047 thisT()->getArithmeticInstrCost(BinaryOperator::Or, RetTy, CostKind);
2048 Cost +=
2049 thisT()->getArithmeticInstrCost(BinaryOperator::Sub, RetTy, CostKind);
2050 Cost += thisT()->getArithmeticInstrCost(
2051 BinaryOperator::Shl, RetTy, CostKind, OpInfoX,
2052 {OpInfoZ.Kind, TTI::OP_None});
2053 Cost += thisT()->getArithmeticInstrCost(
2054 BinaryOperator::LShr, RetTy, CostKind, OpInfoY,
2055 {OpInfoZ.Kind, TTI::OP_None});
2056 // Non-constant shift amounts requires a modulo. If the typesize is a
2057 // power-2 then this will be converted to an and, otherwise it will use a
2058 // urem.
2059 if (!OpInfoZ.isConstant())
2060 Cost += thisT()->getArithmeticInstrCost(
2061 isPowerOf2_32(RetTy->getScalarSizeInBits()) ? BinaryOperator::And
2062 : BinaryOperator::URem,
2063 RetTy, CostKind, OpInfoZ,
2064 {TTI::OK_UniformConstantValue, TTI::OP_None});
2065 // For non-rotates (X != Y) we must add shift-by-zero handling costs.
2066 if (X != Y) {
2067 Type *CondTy = RetTy->getWithNewBitWidth(1);
2068 Cost +=
2069 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2071 Cost +=
2072 thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2074 }
2075 return Cost;
2076 }
2077 case Intrinsic::experimental_cttz_elts: {
2078 EVT ArgType = getTLI()->getValueType(DL, ICA.getArgTypes()[0], true);
2079
2080 // If we're not expanding the intrinsic then we assume this is cheap
2081 // to implement.
2082 if (!getTLI()->shouldExpandCttzElements(ArgType))
2083 return getTypeLegalizationCost(RetTy).first;
2084
2085 // TODO: The costs below reflect the expansion code in
2086 // SelectionDAGBuilder, but we may want to sacrifice some accuracy in
2087 // favour of compile time.
2088
2089 // Find the smallest "sensible" element type to use for the expansion.
2090 bool ZeroIsPoison = !cast<ConstantInt>(Args[1])->isZero();
2091 ConstantRange VScaleRange(APInt(64, 1), APInt::getZero(64));
2092 if (isa<ScalableVectorType>(ICA.getArgTypes()[0]) && I && I->getCaller())
2093 VScaleRange = getVScaleRange(I->getCaller(), 64);
2094
2095 unsigned EltWidth = getTLI()->getBitWidthForCttzElements(
2096 RetTy, ArgType.getVectorElementCount(), ZeroIsPoison, &VScaleRange);
2097 Type *NewEltTy = IntegerType::getIntNTy(RetTy->getContext(), EltWidth);
2098
2099 // Create the new vector type & get the vector length
2100 Type *NewVecTy = VectorType::get(
2101 NewEltTy, cast<VectorType>(Args[0]->getType())->getElementCount());
2102
2103 IntrinsicCostAttributes StepVecAttrs(Intrinsic::stepvector, NewVecTy, {},
2104 FMF);
2106 thisT()->getIntrinsicInstrCost(StepVecAttrs, CostKind);
2107
2108 Cost +=
2109 thisT()->getArithmeticInstrCost(Instruction::Sub, NewVecTy, CostKind);
2110 Cost += thisT()->getCastInstrCost(Instruction::SExt, NewVecTy,
2111 Args[0]->getType(),
2113 Cost +=
2114 thisT()->getArithmeticInstrCost(Instruction::And, NewVecTy, CostKind);
2115
2116 IntrinsicCostAttributes ReducAttrs(Intrinsic::vector_reduce_umax,
2117 NewEltTy, NewVecTy, FMF, I, 1);
2118 Cost += thisT()->getTypeBasedIntrinsicInstrCost(ReducAttrs, CostKind);
2119 Cost +=
2120 thisT()->getArithmeticInstrCost(Instruction::Sub, NewEltTy, CostKind);
2121
2122 return Cost;
2123 }
2124 case Intrinsic::get_active_lane_mask:
2125 case Intrinsic::experimental_vector_match:
2126 case Intrinsic::experimental_vector_histogram_add:
2127 case Intrinsic::experimental_vector_histogram_uadd_sat:
2128 case Intrinsic::experimental_vector_histogram_umax:
2129 case Intrinsic::experimental_vector_histogram_umin:
2130 return thisT()->getTypeBasedIntrinsicInstrCost(ICA, CostKind);
2131 case Intrinsic::modf:
2132 case Intrinsic::sincos:
2133 case Intrinsic::sincospi: {
2134 std::optional<unsigned> CallRetElementIndex;
2135 // The first element of the modf result is returned by value in the
2136 // libcall.
2137 if (ICA.getID() == Intrinsic::modf)
2138 CallRetElementIndex = 0;
2139
2140 if (auto Cost = getMultipleResultIntrinsicVectorLibCallCost(
2141 ICA, CostKind, CallRetElementIndex))
2142 return *Cost;
2143 // Otherwise, fallback to default scalarization cost.
2144 break;
2145 }
2146 }
2147
2148 // Assume that we need to scalarize this intrinsic.)
2149 // Compute the scalarization overhead based on Args for a vector
2150 // intrinsic.
2151 InstructionCost ScalarizationCost = InstructionCost::getInvalid();
2152 if (RetVF.isVector() && !RetVF.isScalable()) {
2153 ScalarizationCost = 0;
2154 if (!RetTy->isVoidTy()) {
2155 for (Type *VectorTy : getContainedTypes(RetTy)) {
2156 ScalarizationCost += getScalarizationOverhead(
2157 cast<VectorType>(VectorTy),
2158 /*Insert=*/true, /*Extract=*/false, CostKind);
2159 }
2160 }
2161 ScalarizationCost += getOperandsScalarizationOverhead(
2162 filterConstantAndDuplicatedOperands(Args, ICA.getArgTypes()),
2163 CostKind);
2164 }
2165
2166 IntrinsicCostAttributes Attrs(IID, RetTy, ICA.getArgTypes(), FMF, I,
2167 ScalarizationCost);
2168 return thisT()->getTypeBasedIntrinsicInstrCost(Attrs, CostKind);
2169 }
2170
2171 /// Get intrinsic cost based on argument types.
2172 /// If ScalarizationCostPassed is std::numeric_limits<unsigned>::max(), the
2173 /// cost of scalarizing the arguments and the return value will be computed
2174 /// based on types.
2178 Intrinsic::ID IID = ICA.getID();
2179 Type *RetTy = ICA.getReturnType();
2180 const SmallVectorImpl<Type *> &Tys = ICA.getArgTypes();
2181 FastMathFlags FMF = ICA.getFlags();
2182 InstructionCost ScalarizationCostPassed = ICA.getScalarizationCost();
2183 bool SkipScalarizationCost = ICA.skipScalarizationCost();
2184
2185 VectorType *VecOpTy = nullptr;
2186 if (!Tys.empty()) {
2187 // The vector reduction operand is operand 0 except for fadd/fmul.
2188 // Their operand 0 is a scalar start value, so the vector op is operand 1.
2189 unsigned VecTyIndex = 0;
2190 if (IID == Intrinsic::vector_reduce_fadd ||
2191 IID == Intrinsic::vector_reduce_fmul)
2192 VecTyIndex = 1;
2193 assert(Tys.size() > VecTyIndex && "Unexpected IntrinsicCostAttributes");
2194 VecOpTy = dyn_cast<VectorType>(Tys[VecTyIndex]);
2195 }
2196
2197 // Library call cost - other than size, make it expensive.
2198 unsigned SingleCallCost = CostKind == TTI::TCK_CodeSize ? 1 : 10;
2199 unsigned ISD = 0;
2200 switch (IID) {
2201 default: {
2202 // Scalable vectors cannot be scalarized, so return Invalid.
2203 if (isa<ScalableVectorType>(RetTy) || any_of(Tys, [](const Type *Ty) {
2204 return isa<ScalableVectorType>(Ty);
2205 }))
2207
2208 // Assume that we need to scalarize this intrinsic.
2209 InstructionCost ScalarizationCost =
2210 SkipScalarizationCost ? ScalarizationCostPassed : 0;
2211 unsigned ScalarCalls = 1;
2212 Type *ScalarRetTy = RetTy;
2213 if (auto *RetVTy = dyn_cast<VectorType>(RetTy)) {
2214 if (!SkipScalarizationCost)
2215 ScalarizationCost = getScalarizationOverhead(
2216 RetVTy, /*Insert*/ true, /*Extract*/ false, CostKind);
2217 ScalarCalls = std::max(ScalarCalls,
2219 ScalarRetTy = RetTy->getScalarType();
2220 }
2221 SmallVector<Type *, 4> ScalarTys;
2222 for (Type *Ty : Tys) {
2223 if (auto *VTy = dyn_cast<VectorType>(Ty)) {
2224 if (!SkipScalarizationCost)
2225 ScalarizationCost += getScalarizationOverhead(
2226 VTy, /*Insert*/ false, /*Extract*/ true, CostKind);
2227 ScalarCalls = std::max(ScalarCalls,
2229 Ty = Ty->getScalarType();
2230 }
2231 ScalarTys.push_back(Ty);
2232 }
2233 if (ScalarCalls == 1)
2234 return 1; // Return cost of a scalar intrinsic. Assume it to be cheap.
2235
2236 IntrinsicCostAttributes ScalarAttrs(IID, ScalarRetTy, ScalarTys, FMF);
2237 InstructionCost ScalarCost =
2238 thisT()->getIntrinsicInstrCost(ScalarAttrs, CostKind);
2239
2240 return ScalarCalls * ScalarCost + ScalarizationCost;
2241 }
2242 // Look for intrinsics that can be lowered directly or turned into a scalar
2243 // intrinsic call.
2244 case Intrinsic::sqrt:
2245 ISD = ISD::FSQRT;
2246 break;
2247 case Intrinsic::sin:
2248 ISD = ISD::FSIN;
2249 break;
2250 case Intrinsic::cos:
2251 ISD = ISD::FCOS;
2252 break;
2253 case Intrinsic::sincos:
2254 ISD = ISD::FSINCOS;
2255 break;
2256 case Intrinsic::sincospi:
2257 ISD = ISD::FSINCOSPI;
2258 break;
2259 case Intrinsic::modf:
2260 ISD = ISD::FMODF;
2261 break;
2262 case Intrinsic::tan:
2263 ISD = ISD::FTAN;
2264 break;
2265 case Intrinsic::asin:
2266 ISD = ISD::FASIN;
2267 break;
2268 case Intrinsic::acos:
2269 ISD = ISD::FACOS;
2270 break;
2271 case Intrinsic::atan:
2272 ISD = ISD::FATAN;
2273 break;
2274 case Intrinsic::atan2:
2275 ISD = ISD::FATAN2;
2276 break;
2277 case Intrinsic::sinh:
2278 ISD = ISD::FSINH;
2279 break;
2280 case Intrinsic::cosh:
2281 ISD = ISD::FCOSH;
2282 break;
2283 case Intrinsic::tanh:
2284 ISD = ISD::FTANH;
2285 break;
2286 case Intrinsic::exp:
2287 ISD = ISD::FEXP;
2288 break;
2289 case Intrinsic::exp2:
2290 ISD = ISD::FEXP2;
2291 break;
2292 case Intrinsic::exp10:
2293 ISD = ISD::FEXP10;
2294 break;
2295 case Intrinsic::log:
2296 ISD = ISD::FLOG;
2297 break;
2298 case Intrinsic::log10:
2299 ISD = ISD::FLOG10;
2300 break;
2301 case Intrinsic::log2:
2302 ISD = ISD::FLOG2;
2303 break;
2304 case Intrinsic::ldexp:
2305 ISD = ISD::FLDEXP;
2306 break;
2307 case Intrinsic::fabs:
2308 ISD = ISD::FABS;
2309 break;
2310 case Intrinsic::canonicalize:
2312 break;
2313 case Intrinsic::minnum:
2314 ISD = ISD::FMINNUM;
2315 break;
2316 case Intrinsic::maxnum:
2317 ISD = ISD::FMAXNUM;
2318 break;
2319 case Intrinsic::minimum:
2320 ISD = ISD::FMINIMUM;
2321 break;
2322 case Intrinsic::maximum:
2323 ISD = ISD::FMAXIMUM;
2324 break;
2325 case Intrinsic::minimumnum:
2326 ISD = ISD::FMINIMUMNUM;
2327 break;
2328 case Intrinsic::maximumnum:
2329 ISD = ISD::FMAXIMUMNUM;
2330 break;
2331 case Intrinsic::copysign:
2333 break;
2334 case Intrinsic::floor:
2335 ISD = ISD::FFLOOR;
2336 break;
2337 case Intrinsic::ceil:
2338 ISD = ISD::FCEIL;
2339 break;
2340 case Intrinsic::trunc:
2341 ISD = ISD::FTRUNC;
2342 break;
2343 case Intrinsic::nearbyint:
2344 ISD = ISD::FNEARBYINT;
2345 break;
2346 case Intrinsic::rint:
2347 ISD = ISD::FRINT;
2348 break;
2349 case Intrinsic::lrint:
2350 ISD = ISD::LRINT;
2351 break;
2352 case Intrinsic::llrint:
2353 ISD = ISD::LLRINT;
2354 break;
2355 case Intrinsic::round:
2356 ISD = ISD::FROUND;
2357 break;
2358 case Intrinsic::roundeven:
2359 ISD = ISD::FROUNDEVEN;
2360 break;
2361 case Intrinsic::lround:
2362 ISD = ISD::LROUND;
2363 break;
2364 case Intrinsic::llround:
2365 ISD = ISD::LLROUND;
2366 break;
2367 case Intrinsic::pow:
2368 ISD = ISD::FPOW;
2369 break;
2370 case Intrinsic::fma:
2371 ISD = ISD::FMA;
2372 break;
2373 case Intrinsic::fmuladd:
2374 ISD = ISD::FMA;
2375 break;
2376 case Intrinsic::experimental_constrained_fmuladd:
2378 break;
2379 // FIXME: We should return 0 whenever getIntrinsicCost == TCC_Free.
2380 case Intrinsic::lifetime_start:
2381 case Intrinsic::lifetime_end:
2382 case Intrinsic::sideeffect:
2383 case Intrinsic::pseudoprobe:
2384 case Intrinsic::arithmetic_fence:
2385 return 0;
2386 case Intrinsic::masked_store: {
2387 Type *Ty = Tys[0];
2388 Align TyAlign = thisT()->DL.getABITypeAlign(Ty);
2389 return thisT()->getMemIntrinsicInstrCost(
2390 MemIntrinsicCostAttributes(IID, Ty, TyAlign, 0), CostKind);
2391 }
2392 case Intrinsic::masked_load: {
2393 Type *Ty = RetTy;
2394 Align TyAlign = thisT()->DL.getABITypeAlign(Ty);
2395 return thisT()->getMemIntrinsicInstrCost(
2396 MemIntrinsicCostAttributes(IID, Ty, TyAlign, 0), CostKind);
2397 }
2398 case Intrinsic::experimental_vp_strided_store: {
2399 auto *Ty = cast<VectorType>(ICA.getArgTypes()[0]);
2400 Align Alignment = thisT()->DL.getABITypeAlign(Ty->getElementType());
2401 return thisT()->getMemIntrinsicInstrCost(
2402 MemIntrinsicCostAttributes(IID, Ty, /*Ptr=*/nullptr,
2403 /*VariableMask=*/true, Alignment,
2404 ICA.getInst()),
2405 CostKind);
2406 }
2407 case Intrinsic::experimental_vp_strided_load: {
2408 auto *Ty = cast<VectorType>(ICA.getReturnType());
2409 Align Alignment = thisT()->DL.getABITypeAlign(Ty->getElementType());
2410 return thisT()->getMemIntrinsicInstrCost(
2411 MemIntrinsicCostAttributes(IID, Ty, /*Ptr=*/nullptr,
2412 /*VariableMask=*/true, Alignment,
2413 ICA.getInst()),
2414 CostKind);
2415 }
2416 case Intrinsic::vector_reduce_add:
2417 case Intrinsic::vector_reduce_mul:
2418 case Intrinsic::vector_reduce_and:
2419 case Intrinsic::vector_reduce_or:
2420 case Intrinsic::vector_reduce_xor:
2421 return thisT()->getArithmeticReductionCost(
2422 getArithmeticReductionInstruction(IID), VecOpTy, std::nullopt,
2423 CostKind);
2424 case Intrinsic::vector_reduce_fadd:
2425 case Intrinsic::vector_reduce_fmul:
2426 return thisT()->getArithmeticReductionCost(
2427 getArithmeticReductionInstruction(IID), VecOpTy, FMF, CostKind);
2428 case Intrinsic::vector_reduce_smax:
2429 case Intrinsic::vector_reduce_smin:
2430 case Intrinsic::vector_reduce_umax:
2431 case Intrinsic::vector_reduce_umin:
2432 case Intrinsic::vector_reduce_fmax:
2433 case Intrinsic::vector_reduce_fmin:
2434 case Intrinsic::vector_reduce_fmaximum:
2435 case Intrinsic::vector_reduce_fminimum:
2436 return thisT()->getMinMaxReductionCost(getMinMaxReductionIntrinsicOp(IID),
2437 VecOpTy, ICA.getFlags(), CostKind);
2438 case Intrinsic::experimental_vector_match: {
2439 auto *SearchTy = cast<VectorType>(ICA.getArgTypes()[0]);
2440 auto *NeedleTy = cast<FixedVectorType>(ICA.getArgTypes()[1]);
2441 unsigned SearchSize = NeedleTy->getNumElements();
2442
2443 // If we're not expanding the intrinsic then we assume this is cheap to
2444 // implement.
2445 EVT SearchVT = getTLI()->getValueType(DL, SearchTy);
2446 if (!getTLI()->shouldExpandVectorMatch(SearchVT, SearchSize))
2447 return getTypeLegalizationCost(RetTy).first;
2448
2449 // Approximate the cost based on the expansion code in
2450 // SelectionDAGBuilder.
2452 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, NeedleTy,
2453 CostKind, 1, nullptr, nullptr);
2454 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, SearchTy,
2455 CostKind, 0, nullptr, nullptr);
2456 Cost += thisT()->getShuffleCost(TTI::SK_Broadcast, SearchTy, SearchTy, {},
2457 CostKind, 0, nullptr);
2458 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, SearchTy, RetTy,
2460 Cost +=
2461 thisT()->getArithmeticInstrCost(BinaryOperator::Or, RetTy, CostKind);
2462 Cost *= SearchSize;
2463 Cost +=
2464 thisT()->getArithmeticInstrCost(BinaryOperator::And, RetTy, CostKind);
2465 return Cost;
2466 }
2467 case Intrinsic::vector_reverse:
2468 return thisT()->getShuffleCost(TTI::SK_Reverse, cast<VectorType>(RetTy),
2469 cast<VectorType>(ICA.getArgTypes()[0]), {},
2470 CostKind, 0, cast<VectorType>(RetTy));
2471 case Intrinsic::experimental_vector_histogram_add:
2472 case Intrinsic::experimental_vector_histogram_uadd_sat:
2473 case Intrinsic::experimental_vector_histogram_umax:
2474 case Intrinsic::experimental_vector_histogram_umin: {
2476 Type *EltTy = ICA.getArgTypes()[1];
2477
2478 // Targets with scalable vectors must handle this on their own.
2479 if (!PtrsTy)
2481
2482 Align Alignment = thisT()->DL.getABITypeAlign(EltTy);
2484 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, PtrsTy,
2485 CostKind, 1, nullptr, nullptr);
2486 Cost += thisT()->getMemoryOpCost(Instruction::Load, EltTy, Alignment, 0,
2487 CostKind);
2488 switch (IID) {
2489 default:
2490 llvm_unreachable("Unhandled histogram update operation.");
2491 case Intrinsic::experimental_vector_histogram_add:
2492 Cost +=
2493 thisT()->getArithmeticInstrCost(Instruction::Add, EltTy, CostKind);
2494 break;
2495 case Intrinsic::experimental_vector_histogram_uadd_sat: {
2496 IntrinsicCostAttributes UAddSat(Intrinsic::uadd_sat, EltTy, {EltTy});
2497 Cost += thisT()->getIntrinsicInstrCost(UAddSat, CostKind);
2498 break;
2499 }
2500 case Intrinsic::experimental_vector_histogram_umax: {
2501 IntrinsicCostAttributes UMax(Intrinsic::umax, EltTy, {EltTy});
2502 Cost += thisT()->getIntrinsicInstrCost(UMax, CostKind);
2503 break;
2504 }
2505 case Intrinsic::experimental_vector_histogram_umin: {
2506 IntrinsicCostAttributes UMin(Intrinsic::umin, EltTy, {EltTy});
2507 Cost += thisT()->getIntrinsicInstrCost(UMin, CostKind);
2508 break;
2509 }
2510 }
2511 Cost += thisT()->getMemoryOpCost(Instruction::Store, EltTy, Alignment, 0,
2512 CostKind);
2513 Cost *= PtrsTy->getNumElements();
2514 return Cost;
2515 }
2516 case Intrinsic::get_active_lane_mask: {
2517 Type *ArgTy = ICA.getArgTypes()[0];
2518 EVT ResVT = getTLI()->getValueType(DL, RetTy, true);
2519 EVT ArgVT = getTLI()->getValueType(DL, ArgTy, true);
2520
2521 // If we're not expanding the intrinsic then we assume this is cheap
2522 // to implement.
2523 if (!getTLI()->shouldExpandGetActiveLaneMask(ResVT, ArgVT))
2524 return getTypeLegalizationCost(RetTy).first;
2525
2526 // Create the expanded types that will be used to calculate the uadd_sat
2527 // operation.
2528 Type *ExpRetTy =
2529 VectorType::get(ArgTy, cast<VectorType>(RetTy)->getElementCount());
2530 IntrinsicCostAttributes Attrs(Intrinsic::uadd_sat, ExpRetTy, {}, FMF);
2532 thisT()->getTypeBasedIntrinsicInstrCost(Attrs, CostKind);
2533 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, ExpRetTy, RetTy,
2535 return Cost;
2536 }
2537 case Intrinsic::experimental_memset_pattern:
2538 // This cost is set to match the cost of the memset_pattern16 libcall.
2539 // It should likely be re-evaluated after migration to this intrinsic
2540 // is complete.
2541 return TTI::TCC_Basic * 4;
2542 case Intrinsic::abs:
2543 ISD = ISD::ABS;
2544 break;
2545 case Intrinsic::fshl:
2546 ISD = ISD::FSHL;
2547 break;
2548 case Intrinsic::fshr:
2549 ISD = ISD::FSHR;
2550 break;
2551 case Intrinsic::smax:
2552 ISD = ISD::SMAX;
2553 break;
2554 case Intrinsic::smin:
2555 ISD = ISD::SMIN;
2556 break;
2557 case Intrinsic::umax:
2558 ISD = ISD::UMAX;
2559 break;
2560 case Intrinsic::umin:
2561 ISD = ISD::UMIN;
2562 break;
2563 case Intrinsic::sadd_sat:
2564 ISD = ISD::SADDSAT;
2565 break;
2566 case Intrinsic::ssub_sat:
2567 ISD = ISD::SSUBSAT;
2568 break;
2569 case Intrinsic::uadd_sat:
2570 ISD = ISD::UADDSAT;
2571 break;
2572 case Intrinsic::usub_sat:
2573 ISD = ISD::USUBSAT;
2574 break;
2575 case Intrinsic::smul_fix:
2576 ISD = ISD::SMULFIX;
2577 break;
2578 case Intrinsic::umul_fix:
2579 ISD = ISD::UMULFIX;
2580 break;
2581 case Intrinsic::sadd_with_overflow:
2582 ISD = ISD::SADDO;
2583 break;
2584 case Intrinsic::ssub_with_overflow:
2585 ISD = ISD::SSUBO;
2586 break;
2587 case Intrinsic::uadd_with_overflow:
2588 ISD = ISD::UADDO;
2589 break;
2590 case Intrinsic::usub_with_overflow:
2591 ISD = ISD::USUBO;
2592 break;
2593 case Intrinsic::smul_with_overflow:
2594 ISD = ISD::SMULO;
2595 break;
2596 case Intrinsic::umul_with_overflow:
2597 ISD = ISD::UMULO;
2598 break;
2599 case Intrinsic::fptosi_sat:
2600 case Intrinsic::fptoui_sat: {
2601 std::pair<InstructionCost, MVT> SrcLT = getTypeLegalizationCost(Tys[0]);
2602 std::pair<InstructionCost, MVT> RetLT = getTypeLegalizationCost(RetTy);
2603
2604 // For cast instructions, types are different between source and
2605 // destination. Also need to check if the source type can be legalize.
2606 if (!SrcLT.first.isValid() || !RetLT.first.isValid())
2608 ISD = IID == Intrinsic::fptosi_sat ? ISD::FP_TO_SINT_SAT
2610 break;
2611 }
2612 case Intrinsic::ctpop:
2613 ISD = ISD::CTPOP;
2614 // In case of legalization use TCC_Expensive. This is cheaper than a
2615 // library call but still not a cheap instruction.
2616 SingleCallCost = TargetTransformInfo::TCC_Expensive;
2617 break;
2618 case Intrinsic::ctlz:
2619 ISD = ISD::CTLZ;
2620 break;
2621 case Intrinsic::cttz:
2622 ISD = ISD::CTTZ;
2623 break;
2624 case Intrinsic::bswap:
2625 ISD = ISD::BSWAP;
2626 break;
2627 case Intrinsic::bitreverse:
2629 break;
2630 case Intrinsic::ucmp:
2631 ISD = ISD::UCMP;
2632 break;
2633 case Intrinsic::scmp:
2634 ISD = ISD::SCMP;
2635 break;
2636 }
2637
2638 auto *ST = dyn_cast<StructType>(RetTy);
2639 Type *LegalizeTy = ST ? ST->getContainedType(0) : RetTy;
2640 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(LegalizeTy);
2641
2642 const TargetLoweringBase *TLI = getTLI();
2643
2644 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
2645 if (IID == Intrinsic::fabs && LT.second.isFloatingPoint() &&
2646 TLI->isFAbsFree(LT.second)) {
2647 return 0;
2648 }
2649
2650 // The operation is legal. Assume it costs 1.
2651 // If the type is split to multiple registers, assume that there is some
2652 // overhead to this.
2653 // TODO: Once we have extract/insert subvector cost we need to use them.
2654 if (LT.first > 1)
2655 return (LT.first * 2);
2656 else
2657 return (LT.first * 1);
2658 } else if (TLI->isOperationCustom(ISD, LT.second)) {
2659 // If the operation is custom lowered then assume
2660 // that the code is twice as expensive.
2661 return (LT.first * 2);
2662 }
2663
2664 switch (IID) {
2665 case Intrinsic::fmuladd: {
2666 // If we can't lower fmuladd into an FMA estimate the cost as a floating
2667 // point mul followed by an add.
2668
2669 return thisT()->getArithmeticInstrCost(BinaryOperator::FMul, RetTy,
2670 CostKind) +
2671 thisT()->getArithmeticInstrCost(BinaryOperator::FAdd, RetTy,
2672 CostKind);
2673 }
2674 case Intrinsic::experimental_constrained_fmuladd: {
2675 IntrinsicCostAttributes FMulAttrs(
2676 Intrinsic::experimental_constrained_fmul, RetTy, Tys);
2677 IntrinsicCostAttributes FAddAttrs(
2678 Intrinsic::experimental_constrained_fadd, RetTy, Tys);
2679 return thisT()->getIntrinsicInstrCost(FMulAttrs, CostKind) +
2680 thisT()->getIntrinsicInstrCost(FAddAttrs, CostKind);
2681 }
2682 case Intrinsic::smin:
2683 case Intrinsic::smax:
2684 case Intrinsic::umin:
2685 case Intrinsic::umax: {
2686 // minmax(X,Y) = select(icmp(X,Y),X,Y)
2687 Type *CondTy = RetTy->getWithNewBitWidth(1);
2688 bool IsUnsigned = IID == Intrinsic::umax || IID == Intrinsic::umin;
2689 CmpInst::Predicate Pred =
2690 IsUnsigned ? CmpInst::ICMP_UGT : CmpInst::ICMP_SGT;
2692 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2693 Pred, CostKind);
2694 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2695 Pred, CostKind);
2696 return Cost;
2697 }
2698 case Intrinsic::sadd_with_overflow:
2699 case Intrinsic::ssub_with_overflow: {
2700 Type *SumTy = RetTy->getContainedType(0);
2701 Type *OverflowTy = RetTy->getContainedType(1);
2702 unsigned Opcode = IID == Intrinsic::sadd_with_overflow
2703 ? BinaryOperator::Add
2704 : BinaryOperator::Sub;
2705
2706 // Add:
2707 // Overflow -> (Result < LHS) ^ (RHS < 0)
2708 // Sub:
2709 // Overflow -> (Result < LHS) ^ (RHS > 0)
2711 Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy, CostKind);
2712 Cost +=
2713 2 * thisT()->getCmpSelInstrCost(Instruction::ICmp, SumTy, OverflowTy,
2715 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::Xor, OverflowTy,
2716 CostKind);
2717 return Cost;
2718 }
2719 case Intrinsic::uadd_with_overflow:
2720 case Intrinsic::usub_with_overflow: {
2721 Type *SumTy = RetTy->getContainedType(0);
2722 Type *OverflowTy = RetTy->getContainedType(1);
2723 unsigned Opcode = IID == Intrinsic::uadd_with_overflow
2724 ? BinaryOperator::Add
2725 : BinaryOperator::Sub;
2726 CmpInst::Predicate Pred = IID == Intrinsic::uadd_with_overflow
2729
2731 Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy, CostKind);
2732 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, SumTy,
2733 OverflowTy, Pred, CostKind);
2734 return Cost;
2735 }
2736 case Intrinsic::smul_with_overflow:
2737 case Intrinsic::umul_with_overflow: {
2738 Type *MulTy = RetTy->getContainedType(0);
2739 Type *OverflowTy = RetTy->getContainedType(1);
2740 unsigned ExtSize = MulTy->getScalarSizeInBits() * 2;
2741 Type *ExtTy = MulTy->getWithNewBitWidth(ExtSize);
2742 bool IsSigned = IID == Intrinsic::smul_with_overflow;
2743
2744 unsigned ExtOp = IsSigned ? Instruction::SExt : Instruction::ZExt;
2746
2748 Cost += 2 * thisT()->getCastInstrCost(ExtOp, ExtTy, MulTy, CCH, CostKind);
2749 Cost +=
2750 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy, CostKind);
2751 Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc, MulTy, ExtTy,
2752 CCH, CostKind);
2753 Cost += thisT()->getArithmeticInstrCost(
2754 Instruction::LShr, ExtTy, CostKind, {TTI::OK_AnyValue, TTI::OP_None},
2756
2757 if (IsSigned)
2758 Cost += thisT()->getArithmeticInstrCost(
2759 Instruction::AShr, MulTy, CostKind,
2762
2763 Cost += thisT()->getCmpSelInstrCost(
2764 BinaryOperator::ICmp, MulTy, OverflowTy, CmpInst::ICMP_NE, CostKind);
2765 return Cost;
2766 }
2767 case Intrinsic::sadd_sat:
2768 case Intrinsic::ssub_sat: {
2769 // Assume a default expansion.
2770 Type *CondTy = RetTy->getWithNewBitWidth(1);
2771
2772 Type *OpTy = StructType::create({RetTy, CondTy});
2773 Intrinsic::ID OverflowOp = IID == Intrinsic::sadd_sat
2774 ? Intrinsic::sadd_with_overflow
2775 : Intrinsic::ssub_with_overflow;
2777
2778 // SatMax -> Overflow && SumDiff < 0
2779 // SatMin -> Overflow && SumDiff >= 0
2781 IntrinsicCostAttributes Attrs(OverflowOp, OpTy, {RetTy, RetTy}, FMF,
2782 nullptr, ScalarizationCostPassed);
2783 Cost += thisT()->getIntrinsicInstrCost(Attrs, CostKind);
2784 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2785 Pred, CostKind);
2786 Cost += 2 * thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy,
2787 CondTy, Pred, CostKind);
2788 return Cost;
2789 }
2790 case Intrinsic::uadd_sat:
2791 case Intrinsic::usub_sat: {
2792 Type *CondTy = RetTy->getWithNewBitWidth(1);
2793
2794 Type *OpTy = StructType::create({RetTy, CondTy});
2795 Intrinsic::ID OverflowOp = IID == Intrinsic::uadd_sat
2796 ? Intrinsic::uadd_with_overflow
2797 : Intrinsic::usub_with_overflow;
2798
2800 IntrinsicCostAttributes Attrs(OverflowOp, OpTy, {RetTy, RetTy}, FMF,
2801 nullptr, ScalarizationCostPassed);
2802 Cost += thisT()->getIntrinsicInstrCost(Attrs, CostKind);
2803 Cost +=
2804 thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2806 return Cost;
2807 }
2808 case Intrinsic::smul_fix:
2809 case Intrinsic::umul_fix: {
2810 unsigned ExtSize = RetTy->getScalarSizeInBits() * 2;
2811 Type *ExtTy = RetTy->getWithNewBitWidth(ExtSize);
2812
2813 unsigned ExtOp =
2814 IID == Intrinsic::smul_fix ? Instruction::SExt : Instruction::ZExt;
2816
2818 Cost += 2 * thisT()->getCastInstrCost(ExtOp, ExtTy, RetTy, CCH, CostKind);
2819 Cost +=
2820 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy, CostKind);
2821 Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc, RetTy, ExtTy,
2822 CCH, CostKind);
2823 Cost += thisT()->getArithmeticInstrCost(
2824 Instruction::LShr, RetTy, CostKind, {TTI::OK_AnyValue, TTI::OP_None},
2826 Cost += thisT()->getArithmeticInstrCost(
2827 Instruction::Shl, RetTy, CostKind, {TTI::OK_AnyValue, TTI::OP_None},
2829 Cost += thisT()->getArithmeticInstrCost(Instruction::Or, RetTy, CostKind);
2830 return Cost;
2831 }
2832 case Intrinsic::abs: {
2833 // abs(X) = select(icmp(X,0),X,sub(0,X))
2834 Type *CondTy = RetTy->getWithNewBitWidth(1);
2837 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2838 Pred, CostKind);
2839 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2840 Pred, CostKind);
2841 // TODO: Should we add an OperandValueProperties::OP_Zero property?
2842 Cost += thisT()->getArithmeticInstrCost(
2843 BinaryOperator::Sub, RetTy, CostKind,
2845 return Cost;
2846 }
2847 case Intrinsic::fshl:
2848 case Intrinsic::fshr: {
2849 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
2850 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
2851 Type *CondTy = RetTy->getWithNewBitWidth(1);
2853 Cost +=
2854 thisT()->getArithmeticInstrCost(BinaryOperator::Or, RetTy, CostKind);
2855 Cost +=
2856 thisT()->getArithmeticInstrCost(BinaryOperator::Sub, RetTy, CostKind);
2857 Cost +=
2858 thisT()->getArithmeticInstrCost(BinaryOperator::Shl, RetTy, CostKind);
2859 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::LShr, RetTy,
2860 CostKind);
2861 // Non-constant shift amounts requires a modulo. If the typesize is a
2862 // power-2 then this will be converted to an and, otherwise it will use a
2863 // urem.
2864 Cost += thisT()->getArithmeticInstrCost(
2865 isPowerOf2_32(RetTy->getScalarSizeInBits()) ? BinaryOperator::And
2866 : BinaryOperator::URem,
2867 RetTy, CostKind, {TTI::OK_AnyValue, TTI::OP_None},
2868 {TTI::OK_UniformConstantValue, TTI::OP_None});
2869 // Shift-by-zero handling.
2870 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2872 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2874 return Cost;
2875 }
2876 case Intrinsic::fptosi_sat:
2877 case Intrinsic::fptoui_sat: {
2878 if (Tys.empty())
2879 break;
2880 Type *FromTy = Tys[0];
2881 bool IsSigned = IID == Intrinsic::fptosi_sat;
2882
2884 IntrinsicCostAttributes Attrs1(Intrinsic::minnum, FromTy,
2885 {FromTy, FromTy});
2886 Cost += thisT()->getIntrinsicInstrCost(Attrs1, CostKind);
2887 IntrinsicCostAttributes Attrs2(Intrinsic::maxnum, FromTy,
2888 {FromTy, FromTy});
2889 Cost += thisT()->getIntrinsicInstrCost(Attrs2, CostKind);
2890 Cost += thisT()->getCastInstrCost(
2891 IsSigned ? Instruction::FPToSI : Instruction::FPToUI, RetTy, FromTy,
2893 if (IsSigned) {
2894 Type *CondTy = RetTy->getWithNewBitWidth(1);
2895 Cost += thisT()->getCmpSelInstrCost(
2896 BinaryOperator::FCmp, FromTy, CondTy, CmpInst::FCMP_UNO, CostKind);
2897 Cost += thisT()->getCmpSelInstrCost(
2898 BinaryOperator::Select, RetTy, CondTy, CmpInst::FCMP_UNO, CostKind);
2899 }
2900 return Cost;
2901 }
2902 case Intrinsic::ucmp:
2903 case Intrinsic::scmp: {
2904 Type *CmpTy = Tys[0];
2905 Type *CondTy = RetTy->getWithNewBitWidth(1);
2907 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, CmpTy, CondTy,
2909 CostKind) +
2910 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, CmpTy, CondTy,
2912 CostKind);
2913
2914 EVT VT = TLI->getValueType(DL, CmpTy, true);
2916 // x < y ? -1 : (x > y ? 1 : 0)
2917 Cost += 2 * thisT()->getCmpSelInstrCost(
2918 BinaryOperator::Select, RetTy, CondTy,
2920 } else {
2921 // zext(x > y) - zext(x < y)
2922 Cost +=
2923 2 * thisT()->getCastInstrCost(CastInst::ZExt, RetTy, CondTy,
2925 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::Sub, RetTy,
2926 CostKind);
2927 }
2928 return Cost;
2929 }
2930 case Intrinsic::maximumnum:
2931 case Intrinsic::minimumnum: {
2932 // On platform that support FMAXNUM_IEEE/FMINNUM_IEEE, we expand
2933 // maximumnum/minimumnum to
2934 // ARG0 = fcanonicalize ARG0, ARG0 // to quiet ARG0
2935 // ARG1 = fcanonicalize ARG1, ARG1 // to quiet ARG1
2936 // RESULT = MAXNUM_IEEE ARG0, ARG1 // or MINNUM_IEEE
2937 // FIXME: In LangRef, we claimed FMAXNUM has the same behaviour of
2938 // FMAXNUM_IEEE, while the backend hasn't migrated the code yet.
2939 // Finally, we will remove FMAXNUM_IEEE and FMINNUM_IEEE.
2940 int IeeeISD =
2941 IID == Intrinsic::maximumnum ? ISD::FMAXNUM_IEEE : ISD::FMINNUM_IEEE;
2942 if (TLI->isOperationLegal(IeeeISD, LT.second)) {
2943 IntrinsicCostAttributes FCanonicalizeAttrs(Intrinsic::canonicalize,
2944 RetTy, Tys[0]);
2945 InstructionCost FCanonicalizeCost =
2946 thisT()->getIntrinsicInstrCost(FCanonicalizeAttrs, CostKind);
2947 return LT.first + FCanonicalizeCost * 2;
2948 }
2949 break;
2950 }
2951 default:
2952 break;
2953 }
2954
2955 // Else, assume that we need to scalarize this intrinsic. For math builtins
2956 // this will emit a costly libcall, adding call overhead and spills. Make it
2957 // very expensive.
2958 if (isVectorizedTy(RetTy)) {
2959 ArrayRef<Type *> RetVTys = getContainedTypes(RetTy);
2960
2961 // Scalable vectors cannot be scalarized, so return Invalid.
2962 if (any_of(concat<Type *const>(RetVTys, Tys),
2963 [](Type *Ty) { return isa<ScalableVectorType>(Ty); }))
2965
2966 InstructionCost ScalarizationCost = ScalarizationCostPassed;
2967 if (!SkipScalarizationCost) {
2968 ScalarizationCost = 0;
2969 for (Type *RetVTy : RetVTys) {
2970 ScalarizationCost += getScalarizationOverhead(
2971 cast<VectorType>(RetVTy), /*Insert=*/true,
2972 /*Extract=*/false, CostKind);
2973 }
2974 }
2975
2976 unsigned ScalarCalls = getVectorizedTypeVF(RetTy).getFixedValue();
2977 SmallVector<Type *, 4> ScalarTys;
2978 for (Type *Ty : Tys) {
2979 if (Ty->isVectorTy())
2980 Ty = Ty->getScalarType();
2981 ScalarTys.push_back(Ty);
2982 }
2983 IntrinsicCostAttributes Attrs(IID, toScalarizedTy(RetTy), ScalarTys, FMF);
2984 InstructionCost ScalarCost =
2985 thisT()->getIntrinsicInstrCost(Attrs, CostKind);
2986 for (Type *Ty : Tys) {
2987 if (auto *VTy = dyn_cast<VectorType>(Ty)) {
2988 if (!ICA.skipScalarizationCost())
2989 ScalarizationCost += getScalarizationOverhead(
2990 VTy, /*Insert*/ false, /*Extract*/ true, CostKind);
2991 ScalarCalls = std::max(ScalarCalls,
2993 }
2994 }
2995 return ScalarCalls * ScalarCost + ScalarizationCost;
2996 }
2997
2998 // This is going to be turned into a library call, make it expensive.
2999 return SingleCallCost;
3000 }
3001
3002 /// Get memory intrinsic cost based on arguments.
3005 TTI::TargetCostKind CostKind) const override {
3006 unsigned Id = MICA.getID();
3007 Type *DataTy = MICA.getDataType();
3008 bool VariableMask = MICA.getVariableMask();
3009 Align Alignment = MICA.getAlignment();
3010
3011 switch (Id) {
3012 case Intrinsic::experimental_vp_strided_load:
3013 case Intrinsic::experimental_vp_strided_store: {
3014 unsigned Opcode = Id == Intrinsic::experimental_vp_strided_load
3015 ? Instruction::Load
3016 : Instruction::Store;
3017 // For a target without strided memory operations (or for an illegal
3018 // operation type on one which does), assume we lower to a gather/scatter
3019 // operation. (Which may in turn be scalarized.)
3020 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment,
3021 VariableMask, true, CostKind);
3022 }
3023 case Intrinsic::masked_scatter:
3024 case Intrinsic::masked_gather:
3025 case Intrinsic::vp_scatter:
3026 case Intrinsic::vp_gather: {
3027 unsigned Opcode = (MICA.getID() == Intrinsic::masked_gather ||
3028 MICA.getID() == Intrinsic::vp_gather)
3029 ? Instruction::Load
3030 : Instruction::Store;
3031
3032 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment,
3033 VariableMask, true, CostKind);
3034 }
3035 case Intrinsic::vp_load:
3036 case Intrinsic::vp_store:
3038 case Intrinsic::masked_load:
3039 case Intrinsic::masked_store: {
3040 unsigned Opcode =
3041 Id == Intrinsic::masked_load ? Instruction::Load : Instruction::Store;
3042 // TODO: Pass on AddressSpace when we have test coverage.
3043 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment, true, false,
3044 CostKind);
3045 }
3046 case Intrinsic::masked_compressstore:
3047 case Intrinsic::masked_expandload: {
3048 unsigned Opcode = MICA.getID() == Intrinsic::masked_expandload
3049 ? Instruction::Load
3050 : Instruction::Store;
3051 // Treat expand load/compress store as gather/scatter operation.
3052 // TODO: implement more precise cost estimation for these intrinsics.
3053 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment,
3054 VariableMask,
3055 /*IsGatherScatter*/ true, CostKind);
3056 }
3057 case Intrinsic::vp_load_ff:
3059 default:
3060 llvm_unreachable("unexpected intrinsic");
3061 }
3062 }
3063
3064 /// Compute a cost of the given call instruction.
3065 ///
3066 /// Compute the cost of calling function F with return type RetTy and
3067 /// argument types Tys. F might be nullptr, in this case the cost of an
3068 /// arbitrary call with the specified signature will be returned.
3069 /// This is used, for instance, when we estimate call of a vector
3070 /// counterpart of the given function.
3071 /// \param F Called function, might be nullptr.
3072 /// \param RetTy Return value types.
3073 /// \param Tys Argument types.
3074 /// \returns The cost of Call instruction.
3077 TTI::TargetCostKind CostKind) const override {
3078 return 10;
3079 }
3080
3081 unsigned getNumberOfParts(Type *Tp) const override {
3082 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Tp);
3083 if (!LT.first.isValid())
3084 return 0;
3085 // Try to find actual number of parts for non-power-of-2 elements as
3086 // ceil(num-of-elements/num-of-subtype-elements).
3087 if (auto *FTp = dyn_cast<FixedVectorType>(Tp);
3088 Tp && LT.second.isFixedLengthVector() &&
3089 !has_single_bit(FTp->getNumElements())) {
3090 if (auto *SubTp = dyn_cast_if_present<FixedVectorType>(
3091 EVT(LT.second).getTypeForEVT(Tp->getContext()));
3092 SubTp && SubTp->getElementType() == FTp->getElementType())
3093 return divideCeil(FTp->getNumElements(), SubTp->getNumElements());
3094 }
3095 return LT.first.getValue();
3096 }
3097
3100 TTI::TargetCostKind) const override {
3101 return 0;
3102 }
3103
3104 /// Try to calculate arithmetic and shuffle op costs for reduction intrinsics.
3105 /// We're assuming that reduction operation are performing the following way:
3106 ///
3107 /// %val1 = shufflevector<n x t> %val, <n x t> %undef,
3108 /// <n x i32> <i32 n/2, i32 n/2 + 1, ..., i32 n, i32 undef, ..., i32 undef>
3109 /// \----------------v-------------/ \----------v------------/
3110 /// n/2 elements n/2 elements
3111 /// %red1 = op <n x t> %val, <n x t> val1
3112 /// After this operation we have a vector %red1 where only the first n/2
3113 /// elements are meaningful, the second n/2 elements are undefined and can be
3114 /// dropped. All other operations are actually working with the vector of
3115 /// length n/2, not n, though the real vector length is still n.
3116 /// %val2 = shufflevector<n x t> %red1, <n x t> %undef,
3117 /// <n x i32> <i32 n/4, i32 n/4 + 1, ..., i32 n/2, i32 undef, ..., i32 undef>
3118 /// \----------------v-------------/ \----------v------------/
3119 /// n/4 elements 3*n/4 elements
3120 /// %red2 = op <n x t> %red1, <n x t> val2 - working with the vector of
3121 /// length n/2, the resulting vector has length n/4 etc.
3122 ///
3123 /// The cost model should take into account that the actual length of the
3124 /// vector is reduced on each iteration.
3127 // Targets must implement a default value for the scalable case, since
3128 // we don't know how many lanes the vector has.
3131
3132 Type *ScalarTy = Ty->getElementType();
3133 unsigned NumVecElts = cast<FixedVectorType>(Ty)->getNumElements();
3134 if ((Opcode == Instruction::Or || Opcode == Instruction::And) &&
3135 ScalarTy == IntegerType::getInt1Ty(Ty->getContext()) &&
3136 NumVecElts >= 2) {
3137 // Or reduction for i1 is represented as:
3138 // %val = bitcast <ReduxWidth x i1> to iReduxWidth
3139 // %res = cmp ne iReduxWidth %val, 0
3140 // And reduction for i1 is represented as:
3141 // %val = bitcast <ReduxWidth x i1> to iReduxWidth
3142 // %res = cmp eq iReduxWidth %val, 11111
3143 Type *ValTy = IntegerType::get(Ty->getContext(), NumVecElts);
3144 return thisT()->getCastInstrCost(Instruction::BitCast, ValTy, Ty,
3146 thisT()->getCmpSelInstrCost(Instruction::ICmp, ValTy,
3149 }
3150 unsigned NumReduxLevels = Log2_32(NumVecElts);
3151 InstructionCost ArithCost = 0;
3152 InstructionCost ShuffleCost = 0;
3153 std::pair<InstructionCost, MVT> LT = thisT()->getTypeLegalizationCost(Ty);
3154 unsigned LongVectorCount = 0;
3155 unsigned MVTLen =
3156 LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
3157 while (NumVecElts > MVTLen) {
3158 NumVecElts /= 2;
3159 VectorType *SubTy = FixedVectorType::get(ScalarTy, NumVecElts);
3160 ShuffleCost += thisT()->getShuffleCost(
3161 TTI::SK_ExtractSubvector, SubTy, Ty, {}, CostKind, NumVecElts, SubTy);
3162 ArithCost += thisT()->getArithmeticInstrCost(Opcode, SubTy, CostKind);
3163 Ty = SubTy;
3164 ++LongVectorCount;
3165 }
3166
3167 NumReduxLevels -= LongVectorCount;
3168
3169 // The minimal length of the vector is limited by the real length of vector
3170 // operations performed on the current platform. That's why several final
3171 // reduction operations are performed on the vectors with the same
3172 // architecture-dependent length.
3173
3174 // By default reductions need one shuffle per reduction level.
3175 ShuffleCost +=
3176 NumReduxLevels * thisT()->getShuffleCost(TTI::SK_PermuteSingleSrc, Ty,
3177 Ty, {}, CostKind, 0, Ty);
3178 ArithCost +=
3179 NumReduxLevels * thisT()->getArithmeticInstrCost(Opcode, Ty, CostKind);
3180 return ShuffleCost + ArithCost +
3181 thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
3182 CostKind, 0, nullptr, nullptr);
3183 }
3184
3185 /// Try to calculate the cost of performing strict (in-order) reductions,
3186 /// which involves doing a sequence of floating point additions in lane
3187 /// order, starting with an initial value. For example, consider a scalar
3188 /// initial value 'InitVal' of type float and a vector of type <4 x float>:
3189 ///
3190 /// Vector = <float %v0, float %v1, float %v2, float %v3>
3191 ///
3192 /// %add1 = %InitVal + %v0
3193 /// %add2 = %add1 + %v1
3194 /// %add3 = %add2 + %v2
3195 /// %add4 = %add3 + %v3
3196 ///
3197 /// As a simple estimate we can say the cost of such a reduction is 4 times
3198 /// the cost of a scalar FP addition. We can only estimate the costs for
3199 /// fixed-width vectors here because for scalable vectors we do not know the
3200 /// runtime number of operations.
3203 // Targets must implement a default value for the scalable case, since
3204 // we don't know how many lanes the vector has.
3207
3208 auto *VTy = cast<FixedVectorType>(Ty);
3210 VTy, /*Insert=*/false, /*Extract=*/true, CostKind);
3211 InstructionCost ArithCost = thisT()->getArithmeticInstrCost(
3212 Opcode, VTy->getElementType(), CostKind);
3213 ArithCost *= VTy->getNumElements();
3214
3215 return ExtractCost + ArithCost;
3216 }
3217
3220 std::optional<FastMathFlags> FMF,
3221 TTI::TargetCostKind CostKind) const override {
3222 assert(Ty && "Unknown reduction vector type");
3224 return getOrderedReductionCost(Opcode, Ty, CostKind);
3225 return getTreeReductionCost(Opcode, Ty, CostKind);
3226 }
3227
3228 /// Try to calculate op costs for min/max reduction operations.
3229 /// \param CondTy Conditional type for the Select instruction.
3232 TTI::TargetCostKind CostKind) const override {
3233 // Targets must implement a default value for the scalable case, since
3234 // we don't know how many lanes the vector has.
3237
3238 Type *ScalarTy = Ty->getElementType();
3239 unsigned NumVecElts = cast<FixedVectorType>(Ty)->getNumElements();
3240 unsigned NumReduxLevels = Log2_32(NumVecElts);
3241 InstructionCost MinMaxCost = 0;
3242 InstructionCost ShuffleCost = 0;
3243 std::pair<InstructionCost, MVT> LT = thisT()->getTypeLegalizationCost(Ty);
3244 unsigned LongVectorCount = 0;
3245 unsigned MVTLen =
3246 LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
3247 while (NumVecElts > MVTLen) {
3248 NumVecElts /= 2;
3249 auto *SubTy = FixedVectorType::get(ScalarTy, NumVecElts);
3250
3251 ShuffleCost += thisT()->getShuffleCost(
3252 TTI::SK_ExtractSubvector, SubTy, Ty, {}, CostKind, NumVecElts, SubTy);
3253
3254 IntrinsicCostAttributes Attrs(IID, SubTy, {SubTy, SubTy}, FMF);
3255 MinMaxCost += getIntrinsicInstrCost(Attrs, CostKind);
3256 Ty = SubTy;
3257 ++LongVectorCount;
3258 }
3259
3260 NumReduxLevels -= LongVectorCount;
3261
3262 // The minimal length of the vector is limited by the real length of vector
3263 // operations performed on the current platform. That's why several final
3264 // reduction opertions are perfomed on the vectors with the same
3265 // architecture-dependent length.
3266 ShuffleCost +=
3267 NumReduxLevels * thisT()->getShuffleCost(TTI::SK_PermuteSingleSrc, Ty,
3268 Ty, {}, CostKind, 0, Ty);
3269 IntrinsicCostAttributes Attrs(IID, Ty, {Ty, Ty}, FMF);
3270 MinMaxCost += NumReduxLevels * getIntrinsicInstrCost(Attrs, CostKind);
3271 // The last min/max should be in vector registers and we counted it above.
3272 // So just need a single extractelement.
3273 return ShuffleCost + MinMaxCost +
3274 thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
3275 CostKind, 0, nullptr, nullptr);
3276 }
3277
3279 getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy,
3280 VectorType *Ty, std::optional<FastMathFlags> FMF,
3281 TTI::TargetCostKind CostKind) const override {
3282 if (auto *FTy = dyn_cast<FixedVectorType>(Ty);
3283 FTy && IsUnsigned && Opcode == Instruction::Add &&
3284 FTy->getElementType() == IntegerType::getInt1Ty(Ty->getContext())) {
3285 // Represent vector_reduce_add(ZExt(<n x i1>)) as
3286 // ZExtOrTrunc(ctpop(bitcast <n x i1> to in)).
3287 auto *IntTy =
3288 IntegerType::get(ResTy->getContext(), FTy->getNumElements());
3289 IntrinsicCostAttributes ICA(Intrinsic::ctpop, IntTy, {IntTy},
3290 FMF ? *FMF : FastMathFlags());
3291 return thisT()->getCastInstrCost(Instruction::BitCast, IntTy, FTy,
3293 thisT()->getIntrinsicInstrCost(ICA, CostKind);
3294 }
3295 // Without any native support, this is equivalent to the cost of
3296 // vecreduce.opcode(ext(Ty A)).
3297 VectorType *ExtTy = VectorType::get(ResTy, Ty);
3298 InstructionCost RedCost =
3299 thisT()->getArithmeticReductionCost(Opcode, ExtTy, FMF, CostKind);
3300 InstructionCost ExtCost = thisT()->getCastInstrCost(
3301 IsUnsigned ? Instruction::ZExt : Instruction::SExt, ExtTy, Ty,
3303
3304 return RedCost + ExtCost;
3305 }
3306
3308 getMulAccReductionCost(bool IsUnsigned, unsigned RedOpcode, Type *ResTy,
3309 VectorType *Ty,
3310 TTI::TargetCostKind CostKind) const override {
3311 // Without any native support, this is equivalent to the cost of
3312 // vecreduce.add(mul(ext(Ty A), ext(Ty B))) or
3313 // vecreduce.add(mul(A, B)).
3314 assert((RedOpcode == Instruction::Add || RedOpcode == Instruction::Sub) &&
3315 "The reduction opcode is expected to be Add or Sub.");
3316 VectorType *ExtTy = VectorType::get(ResTy, Ty);
3317 InstructionCost RedCost = thisT()->getArithmeticReductionCost(
3318 RedOpcode, ExtTy, std::nullopt, CostKind);
3319 InstructionCost ExtCost = thisT()->getCastInstrCost(
3320 IsUnsigned ? Instruction::ZExt : Instruction::SExt, ExtTy, Ty,
3322
3323 InstructionCost MulCost =
3324 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy, CostKind);
3325
3326 return RedCost + MulCost + 2 * ExtCost;
3327 }
3328
3330
3331 /// @}
3332};
3333
3334/// Concrete BasicTTIImpl that can be used if no further customization
3335/// is needed.
3336class BasicTTIImpl : public BasicTTIImplBase<BasicTTIImpl> {
3337 using BaseT = BasicTTIImplBase<BasicTTIImpl>;
3338
3339 friend class BasicTTIImplBase<BasicTTIImpl>;
3340
3341 const TargetSubtargetInfo *ST;
3342 const TargetLoweringBase *TLI;
3343
3344 const TargetSubtargetInfo *getST() const { return ST; }
3345 const TargetLoweringBase *getTLI() const { return TLI; }
3346
3347public:
3348 explicit BasicTTIImpl(const TargetMachine *TM, const Function &F);
3349};
3350
3351} // end namespace llvm
3352
3353#endif // LLVM_CODEGEN_BASICTTIIMPL_H
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file implements the BitVector class.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
static const Function * getCalledFunction(const Value *V)
#define T
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
#define P(N)
static unsigned getNumElements(Type *Ty)
static Type * getValueType(Value *V)
Returns the type of the given value/instruction V.
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
This file describes how to lower LLVM code to machine code.
This file provides helpers for the implementation of a TargetTransformInfo-conforming class.
This pass exposes codegen information to IR-level passes.
Class for arbitrary precision integers.
Definition APInt.h:78
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:235
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
Definition APInt.h:1331
bool sgt(const APInt &RHS) const
Signed greater than comparison.
Definition APInt.h:1202
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1489
bool slt(const APInt &RHS) const
Signed less than comparison.
Definition APInt.h:1131
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition APInt.h:201
an instruction to allocate memory on the stack
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
Definition ArrayRef.h:195
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
ArrayRef< T > drop_back(size_t N=1) const
Drop the last N elements of the array.
Definition ArrayRef.h:201
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
InstructionCost getFPOpCost(Type *Ty) const override
bool preferToKeepConstantsAttached(const Instruction &Inst, const Function &Fn) const override
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false) const override
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, const Value *Op0, const Value *Op1) const override
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind) const override
Try to calculate op costs for min/max reduction operations.
bool isIndexedLoadLegal(TTI::MemIndexedMode M, Type *Ty) const override
InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr, ArrayRef< const Value * > Operands, Type *AccessType, TTI::TargetCostKind CostKind) const override
unsigned getCallerAllocaCost(const CallBase *CB, const AllocaInst *AI) const override
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const override
bool shouldBuildLookupTables() const override
InstructionCost getScalarizationOverhead(VectorType *InTy, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, bool ForPoisonSrc=true, ArrayRef< Value * > VL={}) const override
Estimate the overhead of scalarizing an instruction.
bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const override
bool isProfitableToHoist(Instruction *I) const override
unsigned getNumberOfParts(Type *Tp) const override
unsigned getMinPrefetchStride(unsigned NumMemAccesses, unsigned NumStridedMemAccesses, unsigned NumPrefetches, bool HasCall) const override
InstructionCost getVectorInstrCost(const Instruction &I, Type *Val, TTI::TargetCostKind CostKind, unsigned Index) const override
bool useAA() const override
unsigned getPrefetchDistance() const override
TTI::ShuffleKind improveShuffleKindFromMask(TTI::ShuffleKind Kind, ArrayRef< int > Mask, VectorType *SrcTy, int &Index, VectorType *&SubTy) const
unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy, Type *ScalarValTy) const override
bool isLegalAddScalableImmediate(int64_t Imm) const override
unsigned getAssumedAddrSpace(const Value *V) const override
std::optional< Value * > simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed) const override
bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace, Instruction *I=nullptr, int64_t ScalableOffset=0) const override
bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const override
bool areInlineCompatible(const Function *Caller, const Function *Callee) const override
bool isIndexedStoreLegal(TTI::MemIndexedMode M, Type *Ty) const override
bool haveFastSqrt(Type *Ty) const override
bool collectFlatAddressOperands(SmallVectorImpl< int > &OpIndexes, Intrinsic::ID IID) const override
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Scalar, ArrayRef< std::tuple< Value *, User *, int > > ScalarUserAndIdx) const override
unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI, unsigned &JumpTableSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) const override
Value * rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV, Value *NewV) const override
unsigned adjustInliningThreshold(const CallBase *CB) const override
unsigned getInliningThresholdMultiplier() const override
int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset, int64_t MaxOffset)
bool shouldBuildRelLookupTables() const override
bool isTargetIntrinsicWithStructReturnOverloadAtField(Intrinsic::ID ID, int RetIdx) const override
InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind) const override
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, StackOffset BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace) const override
unsigned getEpilogueVectorizationMinVF() const override
InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index, TTI::TargetCostKind CostKind) const override
InstructionCost getVectorSplitCost() const
bool isTruncateFree(Type *Ty1, Type *Ty2) const override
std::optional< unsigned > getMaxVScale() const override
unsigned getFlatAddressSpace() const override
InstructionCost getCallInstrCost(Function *F, Type *RetTy, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const override
Compute a cost of the given call instruction.
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE) const override
InstructionCost getTreeReductionCost(unsigned Opcode, VectorType *Ty, TTI::TargetCostKind CostKind) const
Try to calculate arithmetic and shuffle op costs for reduction intrinsics.
~BasicTTIImplBase() override=default
std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const override
unsigned getMaxPrefetchIterationsAhead() const override
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP) const override
InstructionCost getTypeBasedIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const
Get intrinsic cost based on argument types.
bool hasBranchDivergence(const Function *F=nullptr) const override
InstructionCost getOrderedReductionCost(unsigned Opcode, VectorType *Ty, TTI::TargetCostKind CostKind) const
Try to calculate the cost of performing strict (in-order) reductions, which involves doing a sequence...
bool isTargetIntrinsicTriviallyScalarizable(Intrinsic::ID ID) const override
bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) const override
std::optional< unsigned > getCacheAssociativity(TargetTransformInfo::CacheLevel Level) const override
bool shouldPrefetchAddressSpace(unsigned AS) const override
bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, unsigned AddressSpace, Align Alignment, unsigned *Fast) const override
unsigned getCacheLineSize() const override
std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const override
bool shouldDropLSRSolutionIfLessProfitable() const override
int getInlinerVectorBonusPercent() const override
bool isVScaleKnownToBeAPowerOfTwo() const override
InstructionCost getMulAccReductionCost(bool IsUnsigned, unsigned RedOpcode, Type *ResTy, VectorType *Ty, TTI::TargetCostKind CostKind) const override
InstructionCost getIndexedVectorInstrCostFromEnd(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index) const override
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
std::pair< InstructionCost, MVT > getTypeLegalizationCost(Type *Ty) const
Estimate the cost of type-legalization and the legalized type.
bool isLegalAddImmediate(int64_t imm) const override
InstructionCost getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF, const APInt &DemandedDstElts, TTI::TargetCostKind CostKind) const override
unsigned getMaxInterleaveFactor(ElementCount VF) const override
bool isSingleThreaded() const override
InstructionCost getScalarizationOverhead(VectorType *InTy, bool Insert, bool Extract, TTI::TargetCostKind CostKind) const
Helper wrapper for the DemandedElts variant of getScalarizationOverhead.
bool isProfitableLSRChainElement(Instruction *I) const override
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const override
bool isTargetIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID, int OpdIdx) const override
bool isTargetIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, unsigned ScalarOpdIdx) const override
std::optional< unsigned > getVScaleForTuning() const override
InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind) const override
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const override
Get intrinsic cost based on arguments.
TailFoldingStyle getPreferredTailFoldingStyle(bool IVUpdateMayOverflow=true) const override
std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) const override
InstructionCost getAddressComputationCost(Type *PtrTy, ScalarEvolution *, const SCEV *, TTI::TargetCostKind) const override
bool isSourceOfDivergence(const Value *V) const override
bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) const override
InstructionCost getScalarizationOverhead(VectorType *RetTy, ArrayRef< const Value * > Args, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const
Estimate the overhead of scalarizing the inputs and outputs of an instruction, with return type RetTy...
std::optional< unsigned > getCacheSize(TargetTransformInfo::CacheLevel Level) const override
bool isAlwaysUniform(const Value *V) const override
bool isLegalICmpImmediate(int64_t imm) const override
bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const override
unsigned getRegUsageForType(Type *Ty) const override
InstructionCost getMemIntrinsicInstrCost(const MemIntrinsicCostAttributes &MICA, TTI::TargetCostKind CostKind) const override
Get memory intrinsic cost based on arguments.
BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
bool isTypeLegal(Type *Ty) const override
bool enableWritePrefetching() const override
bool isLSRCostLess(const TTI::LSRCost &C1, const TTI::LSRCost &C2) const override
InstructionCost getOperandsScalarizationOverhead(ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const override
Estimate the overhead of scalarizing an instruction's operands.
bool isNumRegsMajorCostOfLSR() const override
BasicTTIImpl(const TargetMachine *TM, const Function &F)
size_type count() const
count - Returns the number of bits which are set.
Definition BitVector.h:181
BitVector & set()
Definition BitVector.h:370
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition InstrTypes.h:982
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:703
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition InstrTypes.h:686
static CmpInst::Predicate getGTPredicate(Intrinsic::ID ID)
static CmpInst::Predicate getLTPredicate(Intrinsic::ID ID)
This class represents a range of values.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
constexpr bool isVector() const
One or more elements.
Definition TypeSize.h:324
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition TypeSize.h:309
constexpr bool isScalar() const
Exactly one element.
Definition TypeSize.h:320
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:22
Container class for subtarget features.
Class to represent fixed width SIMD vectors.
unsigned getNumElements() const
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
Definition Type.cpp:802
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition Function.h:352
The core instruction combiner logic.
static InstructionCost getInvalid(CostType Val=0)
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:318
const TargetLibraryInfo * getLibInfo() const
const SmallVectorImpl< Type * > & getArgTypes() const
const SmallVectorImpl< const Value * > & getArgs() const
InstructionCost getScalarizationCost() const
const IntrinsicInst * getInst() const
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
const FeatureBitset & getFeatureBits() const
Machine Value Type.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
Information for memory intrinsic cost model.
The optimization diagnostic interface.
LLVM_ABI void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Diagnostic information for applied optimization remarks.
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Analysis providing profile information.
This class represents an analyzed expression in the program.
The main scalar evolution driver.
static LLVM_ABI bool isZeroEltSplatMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses all elements with the same value as the first element of exa...
static LLVM_ABI bool isSpliceMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is a splice mask, concatenating the two inputs together and then ext...
static LLVM_ABI bool isSelectMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from its source vectors without lane crossings.
static LLVM_ABI bool isExtractSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is an extract subvector mask.
static LLVM_ABI bool isReverseMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask swaps the order of elements from exactly one source vector.
static LLVM_ABI bool isTransposeMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask is a transpose mask.
static LLVM_ABI bool isInsertSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &NumSubElts, int &Index)
Return true if this shuffle mask is an insert subvector mask.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
Definition TypeSize.h:30
static StackOffset getScalable(int64_t Scalable)
Definition TypeSize.h:40
static StackOffset getFixed(int64_t Fixed)
Definition TypeSize.h:39
static LLVM_ABI StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
Definition Type.cpp:619
Multiway switch.
Provides information about what library functions are available for the current target.
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
bool isOperationExpand(unsigned Op, EVT VT) const
Return true if the specified operation is illegal on this target or unlikely to be made legal with cu...
int InstructionOpcodeToISD(unsigned Opcode) const
Get the ISD node that corresponds to the Instruction class opcode.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
virtual bool preferSelectsOverBooleanArithmetic(EVT VT) const
Should we prefer selects to doing arithmetic on boolean types.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases, uint64_t Range, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) const
Return true if lowering to a jump table is suitable for a set of case clusters which may contain NumC...
virtual bool areJTsAllowed(const Function *Fn) const
Return true if lowering to a jump table is allowed.
bool isOperationLegalOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal using promotion.
bool isOperationCustom(unsigned Op, EVT VT) const
Return true if the operation uses custom lowering, regardless of whether the type is legal or not.
bool isSuitableForBitTests(const DenseMap< const BasicBlock *, unsigned int > &DestCmps, const APInt &Low, const APInt &High, const DataLayout &DL) const
Return true if lowering to a bit test is suitable for a set of case clusters which contains NumDests ...
virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const
Return true if it's free to truncate a value of type FromTy to type ToTy.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g.
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const
Return how this store with truncation should be treated: either it is legal, needs to be promoted to ...
LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return how this load with extension should be treated: either it is legal, needs to be promoted to a ...
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return true if the specified load with extension is legal on this target.
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
virtual bool isFAbsFree(EVT VT) const
Return true if an fabs operation is free to the point where it is never worthwhile to replace it with...
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
std::pair< LegalizeTypeAction, EVT > LegalizeKind
LegalizeKind holds the legalization kind that needs to happen to EVT in order to type-legalize it.
Primary interface to the complete machine description for the target machine.
bool isPositionIndependent() const
const Triple & getTargetTriple() const
virtual const TargetSubtargetInfo * getSubtargetImpl(const Function &) const
Virtual method implemented by subclasses that returns a reference to that target's TargetSubtargetInf...
CodeModel::Model getCodeModel() const
Returns the code model.
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual bool isProfitableLSRChainElement(Instruction *I) const
virtual const DataLayout & getDataLayout() const
virtual std::optional< unsigned > getCacheAssociativity(TargetTransformInfo::CacheLevel Level) const
virtual std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) const
virtual bool shouldDropLSRSolutionIfLessProfitable() const
virtual bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) const
virtual bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const
virtual std::optional< Value * > simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed) const
virtual std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const
virtual unsigned getEpilogueVectorizationMinVF() const
virtual bool isLoweredToCall(const Function *F) const
virtual InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info, TTI::OperandValueInfo Opd2Info, ArrayRef< const Value * > Args, const Instruction *CxtI=nullptr) const
virtual InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const
virtual bool isLSRCostLess(const TTI::LSRCost &C1, const TTI::LSRCost &C2) const
virtual InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I) const
virtual InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const
virtual InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info, TTI::OperandValueInfo Op2Info, const Instruction *I) const
virtual TailFoldingStyle getPreferredTailFoldingStyle(bool IVUpdateMayOverflow=true) const
InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr, ArrayRef< const Value * > Operands, Type *AccessType, TTI::TargetCostKind CostKind) const override
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
static LLVM_ABI OperandValueInfo getOperandInfo(const Value *V)
Collect properties of V used in cost analysis, e.g. OP_PowerOf2.
TargetCostKind
The kind of cost model.
@ TCK_RecipThroughput
Reciprocal throughput.
@ TCK_CodeSize
Instruction code size.
static bool requiresOrderedReduction(std::optional< FastMathFlags > FMF)
A helper function to determine the type of reduction algorithm used for a given Opcode and set of Fas...
@ TCC_Expensive
The cost of a 'div' instruction on x86.
@ TCC_Basic
The cost of a typical 'add' instruction.
MemIndexedMode
The type of load/store indexing.
ShuffleKind
The various kinds of shuffle patterns for vector queries.
@ SK_InsertSubvector
InsertSubvector. Index indicates start offset.
@ SK_Select
Selects elements from the corresponding lane of either source operand.
@ SK_PermuteSingleSrc
Shuffle elements of single source vector with any shuffle mask.
@ SK_Transpose
Transpose two vectors.
@ SK_Splice
Concatenates elements from the first input vector with elements of the second input vector.
@ SK_Broadcast
Broadcast element 0 to all other elements.
@ SK_PermuteTwoSrc
Merge elements from two source vectors into one with any shuffle mask.
@ SK_Reverse
Reverse the order of the vector.
@ SK_ExtractSubvector
ExtractSubvector Index indicates start offset.
CastContextHint
Represents a hint about the context in which a cast is used.
@ None
The cast is not used with a load/store of any kind.
@ Normal
The cast is used with a normal load/store.
CacheLevel
The possible cache levels.
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition Triple.h:413
LLVM_ABI bool isArch64Bit() const
Test whether the architecture is 64-bit.
Definition Triple.cpp:1791
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, DriverKit, XROS, or bridgeOS).
Definition Triple.h:627
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition TypeSize.h:343
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Definition Type.cpp:294
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
LLVM_ABI Type * getWithNewBitWidth(unsigned NewBitWidth) const
Given an integer or vector type, change the lane bitwidth to NewBitwidth, whilst keeping the old numb...
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition Type.h:128
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:230
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
Definition Type.cpp:293
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
Definition Type.cpp:300
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition Type.h:225
Type * getContainedType(unsigned i) const
This method is used to implement the type iterator (defined at the end of the file).
Definition Type.h:381
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
Value * getOperand(unsigned i) const
Definition User.h:232
static LLVM_ABI bool isVPBinOp(Intrinsic::ID ID)
static LLVM_ABI bool isVPCast(Intrinsic::ID ID)
static LLVM_ABI bool isVPCmp(Intrinsic::ID ID)
static LLVM_ABI std::optional< unsigned > getFunctionalOpcodeForVP(Intrinsic::ID ID)
static LLVM_ABI std::optional< Intrinsic::ID > getFunctionalIntrinsicIDForVP(Intrinsic::ID ID)
static LLVM_ABI bool isVPIntrinsic(Intrinsic::ID)
static LLVM_ABI bool isVPReduction(Intrinsic::ID ID)
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
Base class of all SIMD vector types.
static VectorType * getHalfElementsVectorType(VectorType *VTy)
This static method returns a VectorType with half as many elements as the input type and the same ele...
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Type * getElementType() const
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:216
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
LLVM_ABI APInt ScaleBitMask(const APInt &A, unsigned NewBitWidth, bool MatchAllBits=false)
Splat/Merge neighboring bits to widen/narrow the bitmask represented by.
Definition APInt.cpp:3009
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
ISD namespace - This namespace contains an enum which represents all of the SelectionDAG node types a...
Definition ISDOpcodes.h:24
@ BSWAP
Byte Swap and Counting operators.
Definition ISDOpcodes.h:771
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
Definition ISDOpcodes.h:387
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
Definition ISDOpcodes.h:511
@ FADD
Simple binary floating point operators.
Definition ISDOpcodes.h:410
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
Definition ISDOpcodes.h:744
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition ISDOpcodes.h:275
@ SSUBO
Same for subtraction.
Definition ISDOpcodes.h:347
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
Definition ISDOpcodes.h:534
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
Definition ISDOpcodes.h:369
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
Definition ISDOpcodes.h:784
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
Definition ISDOpcodes.h:343
@ SMULO
Same for multiplication.
Definition ISDOpcodes.h:351
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
Definition ISDOpcodes.h:724
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
Definition ISDOpcodes.h:793
@ SCMP
[US]CMP - 3-way comparison of signed or unsigned integers.
Definition ISDOpcodes.h:732
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
Definition ISDOpcodes.h:933
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition ISDOpcodes.h:527
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
Definition ISDOpcodes.h:360
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
LLVM_ABI bool isTargetIntrinsic(ID IID)
isTargetIntrinsic - Returns true if IID is an intrinsic specific to a certain target.
LLVM_ABI Libcall getSINCOSPI(EVT RetVT)
getSINCOSPI - Return the SINCOSPI_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getMODF(EVT VT)
getMODF - Return the MODF_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getSINCOS(EVT RetVT)
getSINCOS - Return the SINCOS_* value for the given types, or UNKNOWN_LIBCALL if there is none.
DiagnosticInfoOptimizationBase::Argument NV
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1737
LLVM_ABI Intrinsic::ID getMinMaxReductionIntrinsicOp(Intrinsic::ID RdxID)
Returns the min/max intrinsic used when expanding a min/max reduction.
detail::zippy< detail::zip_first, T, U, Args... > zip_equal(T &&t, U &&u, Args &&...args)
zip iterator that assumes that all iteratees have the same length.
Definition STLExtras.h:839
InstructionCost Cost
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2484
Type * toScalarizedTy(Type *Ty)
A helper for converting vectorized types to scalarized (non-vector) types.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
auto dyn_cast_if_present(const Y &Val)
dyn_cast_if_present<X> - Functionally identical to dyn_cast, except that a null (or none in the case ...
Definition Casting.h:732
LLVM_ABI unsigned getArithmeticReductionInstruction(Intrinsic::ID RdxID)
Returns the arithmetic instruction opcode used when expanding a reduction.
bool isVectorizedTy(Type *Ty)
Returns true if Ty is a vector type or a struct of vector types where all vector types share the same...
detail::concat_range< ValueT, RangeTs... > concat(RangeTs &&...Ranges)
Returns a concatenated range across two or more ranges.
Definition STLExtras.h:1150
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
constexpr bool has_single_bit(T Value) noexcept
Definition bit.h:147
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1744
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:331
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
ElementCount getVectorizedTypeVF(Type *Ty)
Returns the number of vector elements for a vectorized type.
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
constexpr int PoisonMaskElem
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
Definition MathExtras.h:394
FunctionAddr VTableAddr uintptr_t uintptr_t Data
Definition InstrProf.h:189
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ UMax
Unsigned integer max implemented in terms of select(cmp()).
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
ArrayRef< Type * > getContainedTypes(Type *const &Ty)
Returns the types contained in Ty.
cl::opt< unsigned > PartialUnrollingThreshold
LLVM_ABI bool isVectorizedStructTy(StructType *StructTy)
Returns true if StructTy is an unpacked literal struct where all elements are vectors of matching ele...
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Extended Value Type.
Definition ValueTypes.h:35
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition ValueTypes.h:137
ElementCount getVectorElementCount() const
Definition ValueTypes.h:350
static LLVM_ABI EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition ValueTypes.h:316
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
Definition ValueTypes.h:65
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Attributes of a target dependent hardware loop.
static bool hasVectorMaskArgument(RTLIB::LibcallImpl Impl)
Returns true if the function has a vector mask argument, which is assumed to be the last argument.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
bool AllowPeeling
Allow peeling off loop iterations.
bool AllowLoopNestsPeeling
Allow peeling off loop iterations for loop nests.
bool PeelProfiledIterations
Allow peeling basing on profile.
unsigned PeelCount
A forced peeling factor (the number of bodied of the original loop that should be peeled off before t...
Parameters that control the generic loop unrolling transformation.
bool UpperBound
Allow using trip count upper bound to unroll loops.
unsigned PartialOptSizeThreshold
The cost threshold for the unrolled loop when optimizing for size, like OptSizeThreshold,...
unsigned PartialThreshold
The cost threshold for the unrolled loop, like Threshold, but used for partial/runtime unrolling (set...
bool Runtime
Allow runtime unrolling (unrolling of loops to expand the size of the loop body even when the number ...
bool Partial
Allow partial unrolling (unrolling of loops to expand the size of the loop body, not only to eliminat...
unsigned OptSizeThreshold
The cost threshold for the unrolled loop when optimizing for size (set to UINT_MAX to disable).