LLVM 22.0.0git
BasicTTIImpl.h
Go to the documentation of this file.
1//===- BasicTTIImpl.h -------------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file provides a helper that implements much of the TTI interface in
11/// terms of the target-independent code generator and TargetLowering
12/// interfaces.
13//
14//===----------------------------------------------------------------------===//
15
16#ifndef LLVM_CODEGEN_BASICTTIIMPL_H
17#define LLVM_CODEGEN_BASICTTIIMPL_H
18
19#include "llvm/ADT/APInt.h"
20#include "llvm/ADT/BitVector.h"
21#include "llvm/ADT/STLExtras.h"
35#include "llvm/IR/BasicBlock.h"
36#include "llvm/IR/Constant.h"
37#include "llvm/IR/Constants.h"
38#include "llvm/IR/DataLayout.h"
40#include "llvm/IR/InstrTypes.h"
41#include "llvm/IR/Instruction.h"
43#include "llvm/IR/Intrinsics.h"
44#include "llvm/IR/Operator.h"
45#include "llvm/IR/Type.h"
46#include "llvm/IR/Value.h"
54#include <algorithm>
55#include <cassert>
56#include <cstdint>
57#include <limits>
58#include <optional>
59#include <utility>
60
61namespace llvm {
62
63class Function;
64class GlobalValue;
65class LLVMContext;
66class ScalarEvolution;
67class SCEV;
68class TargetMachine;
69
71
72/// Base class which can be used to help build a TTI implementation.
73///
74/// This class provides as much implementation of the TTI interface as is
75/// possible using the target independent parts of the code generator.
76///
77/// In order to subclass it, your class must implement a getST() method to
78/// return the subtarget, and a getTLI() method to return the target lowering.
79/// We need these methods implemented in the derived class so that this class
80/// doesn't have to duplicate storage for them.
81template <typename T>
83private:
85 using TTI = TargetTransformInfo;
86
87 /// Helper function to access this as a T.
88 const T *thisT() const { return static_cast<const T *>(this); }
89
90 /// Estimate a cost of Broadcast as an extract and sequence of insert
91 /// operations.
93 getBroadcastShuffleOverhead(FixedVectorType *VTy,
96 // Broadcast cost is equal to the cost of extracting the zero'th element
97 // plus the cost of inserting it into every element of the result vector.
98 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
99 CostKind, 0, nullptr, nullptr);
100
101 for (int i = 0, e = VTy->getNumElements(); i < e; ++i) {
102 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
103 CostKind, i, nullptr, nullptr);
104 }
105 return Cost;
106 }
107
108 /// Estimate a cost of shuffle as a sequence of extract and insert
109 /// operations.
111 getPermuteShuffleOverhead(FixedVectorType *VTy,
114 // Shuffle cost is equal to the cost of extracting element from its argument
115 // plus the cost of inserting them onto the result vector.
116
117 // e.g. <4 x float> has a mask of <0,5,2,7> i.e we need to extract from
118 // index 0 of first vector, index 1 of second vector,index 2 of first
119 // vector and finally index 3 of second vector and insert them at index
120 // <0,1,2,3> of result vector.
121 for (int i = 0, e = VTy->getNumElements(); i < e; ++i) {
122 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
123 CostKind, i, nullptr, nullptr);
124 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
125 CostKind, i, nullptr, nullptr);
126 }
127 return Cost;
128 }
129
130 /// Estimate a cost of subvector extraction as a sequence of extract and
131 /// insert operations.
132 InstructionCost getExtractSubvectorOverhead(VectorType *VTy,
134 int Index,
135 FixedVectorType *SubVTy) const {
136 assert(VTy && SubVTy &&
137 "Can only extract subvectors from vectors");
138 int NumSubElts = SubVTy->getNumElements();
140 (Index + NumSubElts) <=
142 "SK_ExtractSubvector index out of range");
143
145 // Subvector extraction cost is equal to the cost of extracting element from
146 // the source type plus the cost of inserting them into the result vector
147 // type.
148 for (int i = 0; i != NumSubElts; ++i) {
149 Cost +=
150 thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
151 CostKind, i + Index, nullptr, nullptr);
152 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, SubVTy,
153 CostKind, i, nullptr, nullptr);
154 }
155 return Cost;
156 }
157
158 /// Estimate a cost of subvector insertion as a sequence of extract and
159 /// insert operations.
160 InstructionCost getInsertSubvectorOverhead(VectorType *VTy,
162 int Index,
163 FixedVectorType *SubVTy) const {
164 assert(VTy && SubVTy &&
165 "Can only insert subvectors into vectors");
166 int NumSubElts = SubVTy->getNumElements();
168 (Index + NumSubElts) <=
170 "SK_InsertSubvector index out of range");
171
173 // Subvector insertion cost is equal to the cost of extracting element from
174 // the source type plus the cost of inserting them into the result vector
175 // type.
176 for (int i = 0; i != NumSubElts; ++i) {
177 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, SubVTy,
178 CostKind, i, nullptr, nullptr);
179 Cost +=
180 thisT()->getVectorInstrCost(Instruction::InsertElement, VTy, CostKind,
181 i + Index, nullptr, nullptr);
182 }
183 return Cost;
184 }
185
186 /// Local query method delegates up to T which *must* implement this!
187 const TargetSubtargetInfo *getST() const {
188 return static_cast<const T *>(this)->getST();
189 }
190
191 /// Local query method delegates up to T which *must* implement this!
192 const TargetLoweringBase *getTLI() const {
193 return static_cast<const T *>(this)->getTLI();
194 }
195
196 static ISD::MemIndexedMode getISDIndexedMode(TTI::MemIndexedMode M) {
197 switch (M) {
199 return ISD::UNINDEXED;
200 case TTI::MIM_PreInc:
201 return ISD::PRE_INC;
202 case TTI::MIM_PreDec:
203 return ISD::PRE_DEC;
204 case TTI::MIM_PostInc:
205 return ISD::POST_INC;
206 case TTI::MIM_PostDec:
207 return ISD::POST_DEC;
208 }
209 llvm_unreachable("Unexpected MemIndexedMode");
210 }
211
212 InstructionCost getCommonMaskedMemoryOpCost(unsigned Opcode, Type *DataTy,
213 Align Alignment,
214 bool VariableMask,
215 bool IsGatherScatter,
217 unsigned AddressSpace = 0) const {
218 // We cannot scalarize scalable vectors, so return Invalid.
219 if (isa<ScalableVectorType>(DataTy))
221
222 auto *VT = cast<FixedVectorType>(DataTy);
223 unsigned VF = VT->getNumElements();
224
225 // Assume the target does not have support for gather/scatter operations
226 // and provide a rough estimate.
227 //
228 // First, compute the cost of the individual memory operations.
229 InstructionCost AddrExtractCost =
230 IsGatherScatter ? getScalarizationOverhead(
232 PointerType::get(VT->getContext(), 0), VF),
233 /*Insert=*/false, /*Extract=*/true, CostKind)
234 : 0;
235
236 // The cost of the scalar loads/stores.
237 InstructionCost MemoryOpCost =
238 VF * thisT()->getMemoryOpCost(Opcode, VT->getElementType(), Alignment,
240
241 // Next, compute the cost of packing the result in a vector.
242 InstructionCost PackingCost =
243 getScalarizationOverhead(VT, Opcode != Instruction::Store,
244 Opcode == Instruction::Store, CostKind);
245
246 InstructionCost ConditionalCost = 0;
247 if (VariableMask) {
248 // Compute the cost of conditionally executing the memory operations with
249 // variable masks. This includes extracting the individual conditions, a
250 // branches and PHIs to combine the results.
251 // NOTE: Estimating the cost of conditionally executing the memory
252 // operations accurately is quite difficult and the current solution
253 // provides a very rough estimate only.
254 ConditionalCost =
257 /*Insert=*/false, /*Extract=*/true, CostKind) +
258 VF * (thisT()->getCFInstrCost(Instruction::Br, CostKind) +
259 thisT()->getCFInstrCost(Instruction::PHI, CostKind));
260 }
261
262 return AddrExtractCost + MemoryOpCost + PackingCost + ConditionalCost;
263 }
264
265 /// Checks if the provided mask \p is a splat mask, i.e. it contains only -1
266 /// or same non -1 index value and this index value contained at least twice.
267 /// So, mask <0, -1,-1, -1> is not considered splat (it is just identity),
268 /// same for <-1, 0, -1, -1> (just a slide), while <2, -1, 2, -1> is a splat
269 /// with \p Index=2.
270 static bool isSplatMask(ArrayRef<int> Mask, unsigned NumSrcElts, int &Index) {
271 // Check that the broadcast index meets at least twice.
272 bool IsCompared = false;
273 if (int SplatIdx = PoisonMaskElem;
274 all_of(enumerate(Mask), [&](const auto &P) {
275 if (P.value() == PoisonMaskElem)
276 return P.index() != Mask.size() - 1 || IsCompared;
277 if (static_cast<unsigned>(P.value()) >= NumSrcElts * 2)
278 return false;
279 if (SplatIdx == PoisonMaskElem) {
280 SplatIdx = P.value();
281 return P.index() != Mask.size() - 1;
282 }
283 IsCompared = true;
284 return SplatIdx == P.value();
285 })) {
286 Index = SplatIdx;
287 return true;
288 }
289 return false;
290 }
291
292 /// Several intrinsics that return structs (including llvm.sincos[pi] and
293 /// llvm.modf) can be lowered to a vector library call (for certain VFs). The
294 /// vector library functions correspond to the scalar calls (e.g. sincos or
295 /// modf), which unlike the intrinsic return values via output pointers. This
296 /// helper checks if a vector call exists for the given intrinsic, and returns
297 /// the cost, which includes the cost of the mask (if required), and the loads
298 /// for values returned via output pointers. \p LC is the scalar libcall and
299 /// \p CallRetElementIndex (optional) is the struct element which is mapped to
300 /// the call return value. If std::nullopt is returned, then no vector library
301 /// call is available, so the intrinsic should be assigned the default cost
302 /// (e.g. scalarization).
303 std::optional<InstructionCost> getMultipleResultIntrinsicVectorLibCallCost(
305 RTLIB::Libcall LC,
306 std::optional<unsigned> CallRetElementIndex = {}) const {
307 Type *RetTy = ICA.getReturnType();
308 // Vector variants of the intrinsic can be mapped to a vector library call.
309 auto const *LibInfo = ICA.getLibInfo();
310 if (!LibInfo || !isa<StructType>(RetTy) ||
312 return std::nullopt;
313
314 // Find associated libcall.
315 const char *LCName = getTLI()->getLibcallName(LC);
316 if (!LCName)
317 return std::nullopt;
318
319 // Search for a corresponding vector variant.
320 LLVMContext &Ctx = RetTy->getContext();
322 VecDesc const *VD = nullptr;
323 for (bool Masked : {false, true}) {
324 if ((VD = LibInfo->getVectorMappingInfo(LCName, VF, Masked)))
325 break;
326 }
327 if (!VD)
328 return std::nullopt;
329
330 // Cost the call + mask.
331 auto Cost =
332 thisT()->getCallInstrCost(nullptr, RetTy, ICA.getArgTypes(), CostKind);
333 if (VD->isMasked()) {
334 auto VecTy = VectorType::get(IntegerType::getInt1Ty(Ctx), VF);
335 Cost += thisT()->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy,
336 VecTy, {}, CostKind, 0, nullptr, {});
337 }
338
339 // Lowering to a library call (with output pointers) may require us to emit
340 // reloads for the results.
341 for (auto [Idx, VectorTy] : enumerate(getContainedTypes(RetTy))) {
342 if (Idx == CallRetElementIndex)
343 continue;
344 Cost += thisT()->getMemoryOpCost(
345 Instruction::Load, VectorTy,
346 thisT()->getDataLayout().getABITypeAlign(VectorTy), 0, CostKind);
347 }
348 return Cost;
349 }
350
351 /// Filter out constant and duplicated entries in \p Ops and return a vector
352 /// containing the types from \p Tys corresponding to the remaining operands.
354 filterConstantAndDuplicatedOperands(ArrayRef<const Value *> Ops,
355 ArrayRef<Type *> Tys) {
356 SmallPtrSet<const Value *, 4> UniqueOperands;
357 SmallVector<Type *, 4> FilteredTys;
358 for (const auto &[Op, Ty] : zip_equal(Ops, Tys)) {
359 if (isa<Constant>(Op) || !UniqueOperands.insert(Op).second)
360 continue;
361 FilteredTys.push_back(Ty);
362 }
363 return FilteredTys;
364 }
365
366protected:
367 explicit BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
368 : BaseT(DL) {}
369 ~BasicTTIImplBase() override = default;
370
372
373public:
374 /// \name Scalar TTI Implementations
375 /// @{
377 unsigned AddressSpace, Align Alignment,
378 unsigned *Fast) const override {
379 EVT E = EVT::getIntegerVT(Context, BitWidth);
380 return getTLI()->allowsMisalignedMemoryAccesses(
382 }
383
384 bool areInlineCompatible(const Function *Caller,
385 const Function *Callee) const override {
386 const TargetMachine &TM = getTLI()->getTargetMachine();
387
388 const FeatureBitset &CallerBits =
389 TM.getSubtargetImpl(*Caller)->getFeatureBits();
390 const FeatureBitset &CalleeBits =
391 TM.getSubtargetImpl(*Callee)->getFeatureBits();
392
393 // Inline a callee if its target-features are a subset of the callers
394 // target-features.
395 return (CallerBits & CalleeBits) == CalleeBits;
396 }
397
398 bool hasBranchDivergence(const Function *F = nullptr) const override {
399 return false;
400 }
401
402 bool isSourceOfDivergence(const Value *V) const override { return false; }
403
404 bool isAlwaysUniform(const Value *V) const override { return false; }
405
406 bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const override {
407 return false;
408 }
409
410 bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const override {
411 return true;
412 }
413
414 unsigned getFlatAddressSpace() const override {
415 // Return an invalid address space.
416 return -1;
417 }
418
420 Intrinsic::ID IID) const override {
421 return false;
422 }
423
424 bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const override {
425 return getTLI()->getTargetMachine().isNoopAddrSpaceCast(FromAS, ToAS);
426 }
427
428 unsigned getAssumedAddrSpace(const Value *V) const override {
429 return getTLI()->getTargetMachine().getAssumedAddrSpace(V);
430 }
431
432 bool isSingleThreaded() const override {
433 return getTLI()->getTargetMachine().Options.ThreadModel ==
435 }
436
437 std::pair<const Value *, unsigned>
438 getPredicatedAddrSpace(const Value *V) const override {
439 return getTLI()->getTargetMachine().getPredicatedAddrSpace(V);
440 }
441
443 Value *NewV) const override {
444 return nullptr;
445 }
446
447 bool isLegalAddImmediate(int64_t imm) const override {
448 return getTLI()->isLegalAddImmediate(imm);
449 }
450
451 bool isLegalAddScalableImmediate(int64_t Imm) const override {
452 return getTLI()->isLegalAddScalableImmediate(Imm);
453 }
454
455 bool isLegalICmpImmediate(int64_t imm) const override {
456 return getTLI()->isLegalICmpImmediate(imm);
457 }
458
459 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
460 bool HasBaseReg, int64_t Scale, unsigned AddrSpace,
461 Instruction *I = nullptr,
462 int64_t ScalableOffset = 0) const override {
464 AM.BaseGV = BaseGV;
465 AM.BaseOffs = BaseOffset;
466 AM.HasBaseReg = HasBaseReg;
467 AM.Scale = Scale;
468 AM.ScalableOffset = ScalableOffset;
469 return getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace, I);
470 }
471
472 int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset, int64_t MaxOffset) {
473 return getTLI()->getPreferredLargeGEPBaseOffset(MinOffset, MaxOffset);
474 }
475
476 unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy,
477 Type *ScalarValTy) const override {
478 auto &&IsSupportedByTarget = [this, ScalarMemTy, ScalarValTy](unsigned VF) {
479 auto *SrcTy = FixedVectorType::get(ScalarMemTy, VF / 2);
480 EVT VT = getTLI()->getValueType(DL, SrcTy);
481 if (getTLI()->isOperationLegal(ISD::STORE, VT) ||
482 getTLI()->isOperationCustom(ISD::STORE, VT))
483 return true;
484
485 EVT ValVT =
486 getTLI()->getValueType(DL, FixedVectorType::get(ScalarValTy, VF / 2));
487 EVT LegalizedVT =
488 getTLI()->getTypeToTransformTo(ScalarMemTy->getContext(), VT);
489 return getTLI()->isTruncStoreLegal(LegalizedVT, ValVT);
490 };
491 while (VF > 2 && IsSupportedByTarget(VF))
492 VF /= 2;
493 return VF;
494 }
495
496 bool isIndexedLoadLegal(TTI::MemIndexedMode M, Type *Ty) const override {
497 EVT VT = getTLI()->getValueType(DL, Ty, /*AllowUnknown=*/true);
498 return getTLI()->isIndexedLoadLegal(getISDIndexedMode(M), VT);
499 }
500
501 bool isIndexedStoreLegal(TTI::MemIndexedMode M, Type *Ty) const override {
502 EVT VT = getTLI()->getValueType(DL, Ty, /*AllowUnknown=*/true);
503 return getTLI()->isIndexedStoreLegal(getISDIndexedMode(M), VT);
504 }
505
507 const TTI::LSRCost &C2) const override {
509 }
510
514
518
522
524 StackOffset BaseOffset, bool HasBaseReg,
525 int64_t Scale,
526 unsigned AddrSpace) const override {
528 AM.BaseGV = BaseGV;
529 AM.BaseOffs = BaseOffset.getFixed();
530 AM.HasBaseReg = HasBaseReg;
531 AM.Scale = Scale;
532 AM.ScalableOffset = BaseOffset.getScalable();
533 if (getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace))
534 return 0;
536 }
537
538 bool isTruncateFree(Type *Ty1, Type *Ty2) const override {
539 return getTLI()->isTruncateFree(Ty1, Ty2);
540 }
541
542 bool isProfitableToHoist(Instruction *I) const override {
543 return getTLI()->isProfitableToHoist(I);
544 }
545
546 bool useAA() const override { return getST()->useAA(); }
547
548 bool isTypeLegal(Type *Ty) const override {
549 EVT VT = getTLI()->getValueType(DL, Ty, /*AllowUnknown=*/true);
550 return getTLI()->isTypeLegal(VT);
551 }
552
553 unsigned getRegUsageForType(Type *Ty) const override {
554 EVT ETy = getTLI()->getValueType(DL, Ty);
555 return getTLI()->getNumRegisters(Ty->getContext(), ETy);
556 }
557
559 ArrayRef<const Value *> Operands, Type *AccessType,
560 TTI::TargetCostKind CostKind) const override {
561 return BaseT::getGEPCost(PointeeType, Ptr, Operands, AccessType, CostKind);
562 }
563
565 const SwitchInst &SI, unsigned &JumpTableSize, ProfileSummaryInfo *PSI,
566 BlockFrequencyInfo *BFI) const override {
567 /// Try to find the estimated number of clusters. Note that the number of
568 /// clusters identified in this function could be different from the actual
569 /// numbers found in lowering. This function ignore switches that are
570 /// lowered with a mix of jump table / bit test / BTree. This function was
571 /// initially intended to be used when estimating the cost of switch in
572 /// inline cost heuristic, but it's a generic cost model to be used in other
573 /// places (e.g., in loop unrolling).
574 unsigned N = SI.getNumCases();
575 const TargetLoweringBase *TLI = getTLI();
576 const DataLayout &DL = this->getDataLayout();
577
578 JumpTableSize = 0;
579 bool IsJTAllowed = TLI->areJTsAllowed(SI.getParent()->getParent());
580
581 // Early exit if both a jump table and bit test are not allowed.
582 if (N < 1 || (!IsJTAllowed && DL.getIndexSizeInBits(0u) < N))
583 return N;
584
585 APInt MaxCaseVal = SI.case_begin()->getCaseValue()->getValue();
586 APInt MinCaseVal = MaxCaseVal;
587 for (auto CI : SI.cases()) {
588 const APInt &CaseVal = CI.getCaseValue()->getValue();
589 if (CaseVal.sgt(MaxCaseVal))
590 MaxCaseVal = CaseVal;
591 if (CaseVal.slt(MinCaseVal))
592 MinCaseVal = CaseVal;
593 }
594
595 // Check if suitable for a bit test
596 if (N <= DL.getIndexSizeInBits(0u)) {
598 for (auto I : SI.cases()) {
599 const BasicBlock *BB = I.getCaseSuccessor();
600 ++DestMap[BB];
601 }
602
603 if (TLI->isSuitableForBitTests(DestMap, MinCaseVal, MaxCaseVal, DL))
604 return 1;
605 }
606
607 // Check if suitable for a jump table.
608 if (IsJTAllowed) {
609 if (N < 2 || N < TLI->getMinimumJumpTableEntries())
610 return N;
612 (MaxCaseVal - MinCaseVal)
613 .getLimitedValue(std::numeric_limits<uint64_t>::max() - 1) + 1;
614 // Check whether a range of clusters is dense enough for a jump table
615 if (TLI->isSuitableForJumpTable(&SI, N, Range, PSI, BFI)) {
616 JumpTableSize = Range;
617 return 1;
618 }
619 }
620 return N;
621 }
622
623 bool shouldBuildLookupTables() const override {
624 const TargetLoweringBase *TLI = getTLI();
625 return TLI->isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
626 TLI->isOperationLegalOrCustom(ISD::BRIND, MVT::Other);
627 }
628
629 bool shouldBuildRelLookupTables() const override {
630 const TargetMachine &TM = getTLI()->getTargetMachine();
631 // If non-PIC mode, do not generate a relative lookup table.
632 if (!TM.isPositionIndependent())
633 return false;
634
635 /// Relative lookup table entries consist of 32-bit offsets.
636 /// Do not generate relative lookup tables for large code models
637 /// in 64-bit achitectures where 32-bit offsets might not be enough.
638 if (TM.getCodeModel() == CodeModel::Medium ||
639 TM.getCodeModel() == CodeModel::Large)
640 return false;
641
642 const Triple &TargetTriple = TM.getTargetTriple();
643 if (!TargetTriple.isArch64Bit())
644 return false;
645
646 // TODO: Triggers issues on aarch64 on darwin, so temporarily disable it
647 // there.
648 if (TargetTriple.getArch() == Triple::aarch64 && TargetTriple.isOSDarwin())
649 return false;
650
651 return true;
652 }
653
654 bool haveFastSqrt(Type *Ty) const override {
655 const TargetLoweringBase *TLI = getTLI();
656 EVT VT = TLI->getValueType(DL, Ty);
657 return TLI->isTypeLegal(VT) &&
658 TLI->isOperationLegalOrCustom(ISD::FSQRT, VT);
659 }
660
661 bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) const override { return true; }
662
663 InstructionCost getFPOpCost(Type *Ty) const override {
664 // Check whether FADD is available, as a proxy for floating-point in
665 // general.
666 const TargetLoweringBase *TLI = getTLI();
667 EVT VT = TLI->getValueType(DL, Ty);
671 }
672
674 const Function &Fn) const override {
675 switch (Inst.getOpcode()) {
676 default:
677 break;
678 case Instruction::SDiv:
679 case Instruction::SRem:
680 case Instruction::UDiv:
681 case Instruction::URem: {
682 if (!isa<ConstantInt>(Inst.getOperand(1)))
683 return false;
684 EVT VT = getTLI()->getValueType(DL, Inst.getType());
685 return !getTLI()->isIntDivCheap(VT, Fn.getAttributes());
686 }
687 };
688
689 return false;
690 }
691
692 unsigned getInliningThresholdMultiplier() const override { return 1; }
693 unsigned adjustInliningThreshold(const CallBase *CB) const override {
694 return 0;
695 }
696 unsigned getCallerAllocaCost(const CallBase *CB,
697 const AllocaInst *AI) const override {
698 return 0;
699 }
700
701 int getInlinerVectorBonusPercent() const override { return 150; }
702
705 OptimizationRemarkEmitter *ORE) const override {
706 // This unrolling functionality is target independent, but to provide some
707 // motivation for its intended use, for x86:
708
709 // According to the Intel 64 and IA-32 Architectures Optimization Reference
710 // Manual, Intel Core models and later have a loop stream detector (and
711 // associated uop queue) that can benefit from partial unrolling.
712 // The relevant requirements are:
713 // - The loop must have no more than 4 (8 for Nehalem and later) branches
714 // taken, and none of them may be calls.
715 // - The loop can have no more than 18 (28 for Nehalem and later) uops.
716
717 // According to the Software Optimization Guide for AMD Family 15h
718 // Processors, models 30h-4fh (Steamroller and later) have a loop predictor
719 // and loop buffer which can benefit from partial unrolling.
720 // The relevant requirements are:
721 // - The loop must have fewer than 16 branches
722 // - The loop must have less than 40 uops in all executed loop branches
723
724 // The number of taken branches in a loop is hard to estimate here, and
725 // benchmarking has revealed that it is better not to be conservative when
726 // estimating the branch count. As a result, we'll ignore the branch limits
727 // until someone finds a case where it matters in practice.
728
729 unsigned MaxOps;
730 const TargetSubtargetInfo *ST = getST();
731 if (PartialUnrollingThreshold.getNumOccurrences() > 0)
733 else if (ST->getSchedModel().LoopMicroOpBufferSize > 0)
734 MaxOps = ST->getSchedModel().LoopMicroOpBufferSize;
735 else
736 return;
737
738 // Scan the loop: don't unroll loops with calls.
739 for (BasicBlock *BB : L->blocks()) {
740 for (Instruction &I : *BB) {
741 if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
742 if (const Function *F = cast<CallBase>(I).getCalledFunction()) {
743 if (!thisT()->isLoweredToCall(F))
744 continue;
745 }
746
747 if (ORE) {
748 ORE->emit([&]() {
749 return OptimizationRemark("TTI", "DontUnroll", L->getStartLoc(),
750 L->getHeader())
751 << "advising against unrolling the loop because it "
752 "contains a "
753 << ore::NV("Call", &I);
754 });
755 }
756 return;
757 }
758 }
759 }
760
761 // Enable runtime and partial unrolling up to the specified size.
762 // Enable using trip count upper bound to unroll loops.
763 UP.Partial = UP.Runtime = UP.UpperBound = true;
764 UP.PartialThreshold = MaxOps;
765
766 // Avoid unrolling when optimizing for size.
767 UP.OptSizeThreshold = 0;
769
770 // Set number of instructions optimized when "back edge"
771 // becomes "fall through" to default value of 2.
772 UP.BEInsns = 2;
773 }
774
776 TTI::PeelingPreferences &PP) const override {
777 PP.PeelCount = 0;
778 PP.AllowPeeling = true;
779 PP.AllowLoopNestsPeeling = false;
780 PP.PeelProfiledIterations = true;
781 }
782
785 HardwareLoopInfo &HWLoopInfo) const override {
786 return BaseT::isHardwareLoopProfitable(L, SE, AC, LibInfo, HWLoopInfo);
787 }
788
789 unsigned getEpilogueVectorizationMinVF() const override {
791 }
792
795 }
796
798 getPreferredTailFoldingStyle(bool IVUpdateMayOverflow = true) const override {
799 return BaseT::getPreferredTailFoldingStyle(IVUpdateMayOverflow);
800 }
801
802 std::optional<Instruction *>
805 }
806
807 std::optional<Value *>
809 APInt DemandedMask, KnownBits &Known,
810 bool &KnownBitsComputed) const override {
811 return BaseT::simplifyDemandedUseBitsIntrinsic(IC, II, DemandedMask, Known,
812 KnownBitsComputed);
813 }
814
816 InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
817 APInt &UndefElts2, APInt &UndefElts3,
818 std::function<void(Instruction *, unsigned, APInt, APInt &)>
819 SimplifyAndSetOp) const override {
821 IC, II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
822 SimplifyAndSetOp);
823 }
824
825 std::optional<unsigned>
827 return std::optional<unsigned>(
828 getST()->getCacheSize(static_cast<unsigned>(Level)));
829 }
830
831 std::optional<unsigned>
833 std::optional<unsigned> TargetResult =
834 getST()->getCacheAssociativity(static_cast<unsigned>(Level));
835
836 if (TargetResult)
837 return TargetResult;
838
839 return BaseT::getCacheAssociativity(Level);
840 }
841
842 unsigned getCacheLineSize() const override {
843 return getST()->getCacheLineSize();
844 }
845
846 unsigned getPrefetchDistance() const override {
847 return getST()->getPrefetchDistance();
848 }
849
850 unsigned getMinPrefetchStride(unsigned NumMemAccesses,
851 unsigned NumStridedMemAccesses,
852 unsigned NumPrefetches,
853 bool HasCall) const override {
854 return getST()->getMinPrefetchStride(NumMemAccesses, NumStridedMemAccesses,
855 NumPrefetches, HasCall);
856 }
857
858 unsigned getMaxPrefetchIterationsAhead() const override {
859 return getST()->getMaxPrefetchIterationsAhead();
860 }
861
862 bool enableWritePrefetching() const override {
863 return getST()->enableWritePrefetching();
864 }
865
866 bool shouldPrefetchAddressSpace(unsigned AS) const override {
867 return getST()->shouldPrefetchAddressSpace(AS);
868 }
869
870 /// @}
871
872 /// \name Vector TTI Implementations
873 /// @{
874
879
880 std::optional<unsigned> getMaxVScale() const override { return std::nullopt; }
881 std::optional<unsigned> getVScaleForTuning() const override {
882 return std::nullopt;
883 }
884 bool isVScaleKnownToBeAPowerOfTwo() const override { return false; }
885
886 /// Estimate the overhead of scalarizing an instruction. Insert and Extract
887 /// are set if the demanded result elements need to be inserted and/or
888 /// extracted from vectors.
890 VectorType *InTy, const APInt &DemandedElts, bool Insert, bool Extract,
891 TTI::TargetCostKind CostKind, bool ForPoisonSrc = true,
892 ArrayRef<Value *> VL = {}) const override {
893 /// FIXME: a bitfield is not a reasonable abstraction for talking about
894 /// which elements are needed from a scalable vector
895 if (isa<ScalableVectorType>(InTy))
897 auto *Ty = cast<FixedVectorType>(InTy);
898
899 assert(DemandedElts.getBitWidth() == Ty->getNumElements() &&
900 (VL.empty() || VL.size() == Ty->getNumElements()) &&
901 "Vector size mismatch");
902
904
905 for (int i = 0, e = Ty->getNumElements(); i < e; ++i) {
906 if (!DemandedElts[i])
907 continue;
908 if (Insert) {
909 Value *InsertedVal = VL.empty() ? nullptr : VL[i];
910 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, Ty,
911 CostKind, i, nullptr, InsertedVal);
912 }
913 if (Extract)
914 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
915 CostKind, i, nullptr, nullptr);
916 }
917
918 return Cost;
919 }
920
922 return false;
923 }
924
925 bool
927 unsigned ScalarOpdIdx) const override {
928 return false;
929 }
930
932 int OpdIdx) const override {
933 return OpdIdx == -1;
934 }
935
936 bool
938 int RetIdx) const override {
939 return RetIdx == 0;
940 }
941
942 /// Helper wrapper for the DemandedElts variant of getScalarizationOverhead.
944 bool Extract,
946 if (isa<ScalableVectorType>(InTy))
948 auto *Ty = cast<FixedVectorType>(InTy);
949
950 APInt DemandedElts = APInt::getAllOnes(Ty->getNumElements());
951 return thisT()->getScalarizationOverhead(Ty, DemandedElts, Insert, Extract,
952 CostKind);
953 }
954
955 /// Estimate the overhead of scalarizing an instruction's
956 /// operands. The (potentially vector) types to use for each of
957 /// argument are passes via Tys.
959 ArrayRef<Type *> Tys, TTI::TargetCostKind CostKind) const override {
961 for (Type *Ty : Tys) {
962 // Disregard things like metadata arguments.
963 if (!Ty->isIntOrIntVectorTy() && !Ty->isFPOrFPVectorTy() &&
964 !Ty->isPtrOrPtrVectorTy())
965 continue;
966
967 if (auto *VecTy = dyn_cast<VectorType>(Ty))
968 Cost += getScalarizationOverhead(VecTy, /*Insert*/ false,
969 /*Extract*/ true, CostKind);
970 }
971
972 return Cost;
973 }
974
975 /// Estimate the overhead of scalarizing the inputs and outputs of an
976 /// instruction, with return type RetTy and arguments Args of type Tys. If
977 /// Args are unknown (empty), then the cost associated with one argument is
978 /// added as a heuristic.
984 RetTy, /*Insert*/ true, /*Extract*/ false, CostKind);
985 if (!Args.empty())
987 filterConstantAndDuplicatedOperands(Args, Tys), CostKind);
988 else
989 // When no information on arguments is provided, we add the cost
990 // associated with one argument as a heuristic.
991 Cost += getScalarizationOverhead(RetTy, /*Insert*/ false,
992 /*Extract*/ true, CostKind);
993
994 return Cost;
995 }
996
997 /// Estimate the cost of type-legalization and the legalized type.
998 std::pair<InstructionCost, MVT> getTypeLegalizationCost(Type *Ty) const {
999 LLVMContext &C = Ty->getContext();
1000 EVT MTy = getTLI()->getValueType(DL, Ty);
1001
1003 // We keep legalizing the type until we find a legal kind. We assume that
1004 // the only operation that costs anything is the split. After splitting
1005 // we need to handle two types.
1006 while (true) {
1007 TargetLoweringBase::LegalizeKind LK = getTLI()->getTypeConversion(C, MTy);
1008
1010 // Ensure we return a sensible simple VT here, since many callers of
1011 // this function require it.
1012 MVT VT = MTy.isSimple() ? MTy.getSimpleVT() : MVT::i64;
1013 return std::make_pair(InstructionCost::getInvalid(), VT);
1014 }
1015
1016 if (LK.first == TargetLoweringBase::TypeLegal)
1017 return std::make_pair(Cost, MTy.getSimpleVT());
1018
1019 if (LK.first == TargetLoweringBase::TypeSplitVector ||
1021 Cost *= 2;
1022
1023 // Do not loop with f128 type.
1024 if (MTy == LK.second)
1025 return std::make_pair(Cost, MTy.getSimpleVT());
1026
1027 // Keep legalizing the type.
1028 MTy = LK.second;
1029 }
1030 }
1031
1032 unsigned getMaxInterleaveFactor(ElementCount VF) const override { return 1; }
1033
1035 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
1038 ArrayRef<const Value *> Args = {},
1039 const Instruction *CxtI = nullptr) const override {
1040 // Check if any of the operands are vector operands.
1041 const TargetLoweringBase *TLI = getTLI();
1042 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1043 assert(ISD && "Invalid opcode");
1044
1045 // TODO: Handle more cost kinds.
1047 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind,
1048 Opd1Info, Opd2Info,
1049 Args, CxtI);
1050
1051 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
1052
1053 bool IsFloat = Ty->isFPOrFPVectorTy();
1054 // Assume that floating point arithmetic operations cost twice as much as
1055 // integer operations.
1056 InstructionCost OpCost = (IsFloat ? 2 : 1);
1057
1058 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
1059 // The operation is legal. Assume it costs 1.
1060 // TODO: Once we have extract/insert subvector cost we need to use them.
1061 return LT.first * OpCost;
1062 }
1063
1064 if (!TLI->isOperationExpand(ISD, LT.second)) {
1065 // If the operation is custom lowered, then assume that the code is twice
1066 // as expensive.
1067 return LT.first * 2 * OpCost;
1068 }
1069
1070 // An 'Expand' of URem and SRem is special because it may default
1071 // to expanding the operation into a sequence of sub-operations
1072 // i.e. X % Y -> X-(X/Y)*Y.
1073 if (ISD == ISD::UREM || ISD == ISD::SREM) {
1074 bool IsSigned = ISD == ISD::SREM;
1075 if (TLI->isOperationLegalOrCustom(IsSigned ? ISD::SDIVREM : ISD::UDIVREM,
1076 LT.second) ||
1077 TLI->isOperationLegalOrCustom(IsSigned ? ISD::SDIV : ISD::UDIV,
1078 LT.second)) {
1079 unsigned DivOpc = IsSigned ? Instruction::SDiv : Instruction::UDiv;
1080 InstructionCost DivCost = thisT()->getArithmeticInstrCost(
1081 DivOpc, Ty, CostKind, Opd1Info, Opd2Info);
1082 InstructionCost MulCost =
1083 thisT()->getArithmeticInstrCost(Instruction::Mul, Ty, CostKind);
1084 InstructionCost SubCost =
1085 thisT()->getArithmeticInstrCost(Instruction::Sub, Ty, CostKind);
1086 return DivCost + MulCost + SubCost;
1087 }
1088 }
1089
1090 // We cannot scalarize scalable vectors, so return Invalid.
1093
1094 // Else, assume that we need to scalarize this op.
1095 // TODO: If one of the types get legalized by splitting, handle this
1096 // similarly to what getCastInstrCost() does.
1097 if (auto *VTy = dyn_cast<FixedVectorType>(Ty)) {
1098 InstructionCost Cost = thisT()->getArithmeticInstrCost(
1099 Opcode, VTy->getScalarType(), CostKind, Opd1Info, Opd2Info,
1100 Args, CxtI);
1101 // Return the cost of multiple scalar invocation plus the cost of
1102 // inserting and extracting the values.
1103 SmallVector<Type *> Tys(Args.size(), Ty);
1104 return getScalarizationOverhead(VTy, Args, Tys, CostKind) +
1105 VTy->getNumElements() * Cost;
1106 }
1107
1108 // We don't know anything about this scalar instruction.
1109 return OpCost;
1110 }
1111
1113 ArrayRef<int> Mask,
1114 VectorType *SrcTy, int &Index,
1115 VectorType *&SubTy) const {
1116 if (Mask.empty())
1117 return Kind;
1118 int NumDstElts = Mask.size();
1119 int NumSrcElts = SrcTy->getElementCount().getKnownMinValue();
1120 switch (Kind) {
1122 if (ShuffleVectorInst::isReverseMask(Mask, NumSrcElts))
1123 return TTI::SK_Reverse;
1124 if (ShuffleVectorInst::isZeroEltSplatMask(Mask, NumSrcElts))
1125 return TTI::SK_Broadcast;
1126 if (isSplatMask(Mask, NumSrcElts, Index))
1127 return TTI::SK_Broadcast;
1128 if (ShuffleVectorInst::isExtractSubvectorMask(Mask, NumSrcElts, Index) &&
1129 (Index + NumDstElts) <= NumSrcElts) {
1130 SubTy = FixedVectorType::get(SrcTy->getElementType(), NumDstElts);
1132 }
1133 break;
1134 }
1135 case TTI::SK_PermuteTwoSrc: {
1136 if (all_of(Mask, [NumSrcElts](int M) { return M < NumSrcElts; }))
1138 Index, SubTy);
1139 int NumSubElts;
1140 if (NumDstElts > 2 && ShuffleVectorInst::isInsertSubvectorMask(
1141 Mask, NumSrcElts, NumSubElts, Index)) {
1142 if (Index + NumSubElts > NumSrcElts)
1143 return Kind;
1144 SubTy = FixedVectorType::get(SrcTy->getElementType(), NumSubElts);
1146 }
1147 if (ShuffleVectorInst::isSelectMask(Mask, NumSrcElts))
1148 return TTI::SK_Select;
1149 if (ShuffleVectorInst::isTransposeMask(Mask, NumSrcElts))
1150 return TTI::SK_Transpose;
1151 if (ShuffleVectorInst::isSpliceMask(Mask, NumSrcElts, Index))
1152 return TTI::SK_Splice;
1153 break;
1154 }
1155 case TTI::SK_Select:
1156 case TTI::SK_Reverse:
1157 case TTI::SK_Broadcast:
1158 case TTI::SK_Transpose:
1161 case TTI::SK_Splice:
1162 break;
1163 }
1164 return Kind;
1165 }
1166
1170 VectorType *SubTp, ArrayRef<const Value *> Args = {},
1171 const Instruction *CxtI = nullptr) const override {
1172 switch (improveShuffleKindFromMask(Kind, Mask, SrcTy, Index, SubTp)) {
1173 case TTI::SK_Broadcast:
1174 if (auto *FVT = dyn_cast<FixedVectorType>(SrcTy))
1175 return getBroadcastShuffleOverhead(FVT, CostKind);
1177 case TTI::SK_Select:
1178 case TTI::SK_Splice:
1179 case TTI::SK_Reverse:
1180 case TTI::SK_Transpose:
1183 if (auto *FVT = dyn_cast<FixedVectorType>(SrcTy))
1184 return getPermuteShuffleOverhead(FVT, CostKind);
1187 return getExtractSubvectorOverhead(SrcTy, CostKind, Index,
1188 cast<FixedVectorType>(SubTp));
1190 return getInsertSubvectorOverhead(DstTy, CostKind, Index,
1191 cast<FixedVectorType>(SubTp));
1192 }
1193 llvm_unreachable("Unknown TTI::ShuffleKind");
1194 }
1195
1197 getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
1199 const Instruction *I = nullptr) const override {
1200 if (BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I) == 0)
1201 return 0;
1202
1203 const TargetLoweringBase *TLI = getTLI();
1204 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1205 assert(ISD && "Invalid opcode");
1206 std::pair<InstructionCost, MVT> SrcLT = getTypeLegalizationCost(Src);
1207 std::pair<InstructionCost, MVT> DstLT = getTypeLegalizationCost(Dst);
1208
1209 TypeSize SrcSize = SrcLT.second.getSizeInBits();
1210 TypeSize DstSize = DstLT.second.getSizeInBits();
1211 bool IntOrPtrSrc = Src->isIntegerTy() || Src->isPointerTy();
1212 bool IntOrPtrDst = Dst->isIntegerTy() || Dst->isPointerTy();
1213
1214 switch (Opcode) {
1215 default:
1216 break;
1217 case Instruction::Trunc:
1218 // Check for NOOP conversions.
1219 if (TLI->isTruncateFree(SrcLT.second, DstLT.second))
1220 return 0;
1221 [[fallthrough]];
1222 case Instruction::BitCast:
1223 // Bitcast between types that are legalized to the same type are free and
1224 // assume int to/from ptr of the same size is also free.
1225 if (SrcLT.first == DstLT.first && IntOrPtrSrc == IntOrPtrDst &&
1226 SrcSize == DstSize)
1227 return 0;
1228 break;
1229 case Instruction::FPExt:
1230 if (I && getTLI()->isExtFree(I))
1231 return 0;
1232 break;
1233 case Instruction::ZExt:
1234 if (TLI->isZExtFree(SrcLT.second, DstLT.second))
1235 return 0;
1236 [[fallthrough]];
1237 case Instruction::SExt:
1238 if (I && getTLI()->isExtFree(I))
1239 return 0;
1240
1241 // If this is a zext/sext of a load, return 0 if the corresponding
1242 // extending load exists on target and the result type is legal.
1243 if (CCH == TTI::CastContextHint::Normal) {
1244 EVT ExtVT = EVT::getEVT(Dst);
1245 EVT LoadVT = EVT::getEVT(Src);
1246 unsigned LType =
1247 ((Opcode == Instruction::ZExt) ? ISD::ZEXTLOAD : ISD::SEXTLOAD);
1248 if (DstLT.first == SrcLT.first &&
1249 TLI->isLoadExtLegal(LType, ExtVT, LoadVT))
1250 return 0;
1251 }
1252 break;
1253 case Instruction::AddrSpaceCast:
1254 if (TLI->isFreeAddrSpaceCast(Src->getPointerAddressSpace(),
1255 Dst->getPointerAddressSpace()))
1256 return 0;
1257 break;
1258 }
1259
1260 auto *SrcVTy = dyn_cast<VectorType>(Src);
1261 auto *DstVTy = dyn_cast<VectorType>(Dst);
1262
1263 // If the cast is marked as legal (or promote) then assume low cost.
1264 if (SrcLT.first == DstLT.first &&
1265 TLI->isOperationLegalOrPromote(ISD, DstLT.second))
1266 return SrcLT.first;
1267
1268 // Handle scalar conversions.
1269 if (!SrcVTy && !DstVTy) {
1270 // Just check the op cost. If the operation is legal then assume it costs
1271 // 1.
1272 if (!TLI->isOperationExpand(ISD, DstLT.second))
1273 return 1;
1274
1275 // Assume that illegal scalar instruction are expensive.
1276 return 4;
1277 }
1278
1279 // Check vector-to-vector casts.
1280 if (DstVTy && SrcVTy) {
1281 // If the cast is between same-sized registers, then the check is simple.
1282 if (SrcLT.first == DstLT.first && SrcSize == DstSize) {
1283
1284 // Assume that Zext is done using AND.
1285 if (Opcode == Instruction::ZExt)
1286 return SrcLT.first;
1287
1288 // Assume that sext is done using SHL and SRA.
1289 if (Opcode == Instruction::SExt)
1290 return SrcLT.first * 2;
1291
1292 // Just check the op cost. If the operation is legal then assume it
1293 // costs
1294 // 1 and multiply by the type-legalization overhead.
1295 if (!TLI->isOperationExpand(ISD, DstLT.second))
1296 return SrcLT.first * 1;
1297 }
1298
1299 // If we are legalizing by splitting, query the concrete TTI for the cost
1300 // of casting the original vector twice. We also need to factor in the
1301 // cost of the split itself. Count that as 1, to be consistent with
1302 // getTypeLegalizationCost().
1303 bool SplitSrc =
1304 TLI->getTypeAction(Src->getContext(), TLI->getValueType(DL, Src)) ==
1306 bool SplitDst =
1307 TLI->getTypeAction(Dst->getContext(), TLI->getValueType(DL, Dst)) ==
1309 if ((SplitSrc || SplitDst) && SrcVTy->getElementCount().isVector() &&
1310 DstVTy->getElementCount().isVector()) {
1311 Type *SplitDstTy = VectorType::getHalfElementsVectorType(DstVTy);
1312 Type *SplitSrcTy = VectorType::getHalfElementsVectorType(SrcVTy);
1313 const T *TTI = thisT();
1314 // If both types need to be split then the split is free.
1315 InstructionCost SplitCost =
1316 (!SplitSrc || !SplitDst) ? TTI->getVectorSplitCost() : 0;
1317 return SplitCost +
1318 (2 * TTI->getCastInstrCost(Opcode, SplitDstTy, SplitSrcTy, CCH,
1319 CostKind, I));
1320 }
1321
1322 // Scalarization cost is Invalid, can't assume any num elements.
1323 if (isa<ScalableVectorType>(DstVTy))
1325
1326 // In other cases where the source or destination are illegal, assume
1327 // the operation will get scalarized.
1328 unsigned Num = cast<FixedVectorType>(DstVTy)->getNumElements();
1329 InstructionCost Cost = thisT()->getCastInstrCost(
1330 Opcode, Dst->getScalarType(), Src->getScalarType(), CCH, CostKind, I);
1331
1332 // Return the cost of multiple scalar invocation plus the cost of
1333 // inserting and extracting the values.
1334 return getScalarizationOverhead(DstVTy, /*Insert*/ true, /*Extract*/ true,
1335 CostKind) +
1336 Num * Cost;
1337 }
1338
1339 // We already handled vector-to-vector and scalar-to-scalar conversions.
1340 // This
1341 // is where we handle bitcast between vectors and scalars. We need to assume
1342 // that the conversion is scalarized in one way or another.
1343 if (Opcode == Instruction::BitCast) {
1344 // Illegal bitcasts are done by storing and loading from a stack slot.
1345 return (SrcVTy ? getScalarizationOverhead(SrcVTy, /*Insert*/ false,
1346 /*Extract*/ true, CostKind)
1347 : 0) +
1348 (DstVTy ? getScalarizationOverhead(DstVTy, /*Insert*/ true,
1349 /*Extract*/ false, CostKind)
1350 : 0);
1351 }
1352
1353 llvm_unreachable("Unhandled cast");
1354 }
1355
1357 getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy,
1358 unsigned Index,
1359 TTI::TargetCostKind CostKind) const override {
1360 return thisT()->getVectorInstrCost(Instruction::ExtractElement, VecTy,
1361 CostKind, Index, nullptr, nullptr) +
1362 thisT()->getCastInstrCost(Opcode, Dst, VecTy->getElementType(),
1364 }
1365
1368 const Instruction *I = nullptr) const override {
1369 return BaseT::getCFInstrCost(Opcode, CostKind, I);
1370 }
1371
1373 unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred,
1377 const Instruction *I = nullptr) const override {
1378 const TargetLoweringBase *TLI = getTLI();
1379 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1380 assert(ISD && "Invalid opcode");
1381
1382 if (getTLI()->getValueType(DL, ValTy, true) == MVT::Other)
1383 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
1384 Op1Info, Op2Info, I);
1385
1386 // Selects on vectors are actually vector selects.
1387 if (ISD == ISD::SELECT) {
1388 assert(CondTy && "CondTy must exist");
1389 if (CondTy->isVectorTy())
1390 ISD = ISD::VSELECT;
1391 }
1392 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
1393
1394 if (!(ValTy->isVectorTy() && !LT.second.isVector()) &&
1395 !TLI->isOperationExpand(ISD, LT.second)) {
1396 // The operation is legal. Assume it costs 1. Multiply
1397 // by the type-legalization overhead.
1398 return LT.first * 1;
1399 }
1400
1401 // Otherwise, assume that the cast is scalarized.
1402 // TODO: If one of the types get legalized by splitting, handle this
1403 // similarly to what getCastInstrCost() does.
1404 if (auto *ValVTy = dyn_cast<VectorType>(ValTy)) {
1405 if (isa<ScalableVectorType>(ValTy))
1407
1408 unsigned Num = cast<FixedVectorType>(ValVTy)->getNumElements();
1409 InstructionCost Cost = thisT()->getCmpSelInstrCost(
1410 Opcode, ValVTy->getScalarType(), CondTy->getScalarType(), VecPred,
1411 CostKind, Op1Info, Op2Info, I);
1412
1413 // Return the cost of multiple scalar invocation plus the cost of
1414 // inserting and extracting the values.
1415 return getScalarizationOverhead(ValVTy, /*Insert*/ true,
1416 /*Extract*/ false, CostKind) +
1417 Num * Cost;
1418 }
1419
1420 // Unknown scalar opcode.
1421 return 1;
1422 }
1423
1426 unsigned Index, const Value *Op0,
1427 const Value *Op1) const override {
1428 return getRegUsageForType(Val->getScalarType());
1429 }
1430
1431 /// \param ScalarUserAndIdx encodes the information about extracts from a
1432 /// vector with 'Scalar' being the value being extracted,'User' being the user
1433 /// of the extract(nullptr if user is not known before vectorization) and
1434 /// 'Idx' being the extract lane.
1437 unsigned Index, Value *Scalar,
1438 ArrayRef<std::tuple<Value *, User *, int>>
1439 ScalarUserAndIdx) const override {
1440 return thisT()->getVectorInstrCost(Opcode, Val, CostKind, Index, nullptr,
1441 nullptr);
1442 }
1443
1446 unsigned Index) const override {
1447 Value *Op0 = nullptr;
1448 Value *Op1 = nullptr;
1449 if (auto *IE = dyn_cast<InsertElementInst>(&I)) {
1450 Op0 = IE->getOperand(0);
1451 Op1 = IE->getOperand(1);
1452 }
1453 return thisT()->getVectorInstrCost(I.getOpcode(), Val, CostKind, Index, Op0,
1454 Op1);
1455 }
1456
1460 unsigned Index) const override {
1461 unsigned NewIndex = -1;
1462 if (auto *FVTy = dyn_cast<FixedVectorType>(Val)) {
1463 assert(Index < FVTy->getNumElements() &&
1464 "Unexpected index from end of vector");
1465 NewIndex = FVTy->getNumElements() - 1 - Index;
1466 }
1467 return thisT()->getVectorInstrCost(Opcode, Val, CostKind, NewIndex, nullptr,
1468 nullptr);
1469 }
1470
1472 getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF,
1473 const APInt &DemandedDstElts,
1474 TTI::TargetCostKind CostKind) const override {
1475 assert(DemandedDstElts.getBitWidth() == (unsigned)VF * ReplicationFactor &&
1476 "Unexpected size of DemandedDstElts.");
1477
1479
1480 auto *SrcVT = FixedVectorType::get(EltTy, VF);
1481 auto *ReplicatedVT = FixedVectorType::get(EltTy, VF * ReplicationFactor);
1482
1483 // The Mask shuffling cost is extract all the elements of the Mask
1484 // and insert each of them Factor times into the wide vector:
1485 //
1486 // E.g. an interleaved group with factor 3:
1487 // %mask = icmp ult <8 x i32> %vec1, %vec2
1488 // %interleaved.mask = shufflevector <8 x i1> %mask, <8 x i1> undef,
1489 // <24 x i32> <0,0,0,1,1,1,2,2,2,3,3,3,4,4,4,5,5,5,6,6,6,7,7,7>
1490 // The cost is estimated as extract all mask elements from the <8xi1> mask
1491 // vector and insert them factor times into the <24xi1> shuffled mask
1492 // vector.
1493 APInt DemandedSrcElts = APIntOps::ScaleBitMask(DemandedDstElts, VF);
1494 Cost += thisT()->getScalarizationOverhead(SrcVT, DemandedSrcElts,
1495 /*Insert*/ false,
1496 /*Extract*/ true, CostKind);
1497 Cost += thisT()->getScalarizationOverhead(ReplicatedVT, DemandedDstElts,
1498 /*Insert*/ true,
1499 /*Extract*/ false, CostKind);
1500
1501 return Cost;
1502 }
1503
1505 unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
1508 const Instruction *I = nullptr) const override {
1509 assert(!Src->isVoidTy() && "Invalid type");
1510 // Assume types, such as structs, are expensive.
1511 if (getTLI()->getValueType(DL, Src, true) == MVT::Other)
1512 return 4;
1513 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Src);
1514
1515 // Assuming that all loads of legal types cost 1.
1516 InstructionCost Cost = LT.first;
1518 return Cost;
1519
1520 const DataLayout &DL = this->getDataLayout();
1521 if (Src->isVectorTy() &&
1522 // In practice it's not currently possible to have a change in lane
1523 // length for extending loads or truncating stores so both types should
1524 // have the same scalable property.
1525 TypeSize::isKnownLT(DL.getTypeStoreSizeInBits(Src),
1526 LT.second.getSizeInBits())) {
1527 // This is a vector load that legalizes to a larger type than the vector
1528 // itself. Unless the corresponding extending load or truncating store is
1529 // legal, then this will scalarize.
1531 EVT MemVT = getTLI()->getValueType(DL, Src);
1532 if (Opcode == Instruction::Store)
1533 LA = getTLI()->getTruncStoreAction(LT.second, MemVT);
1534 else
1535 LA = getTLI()->getLoadExtAction(ISD::EXTLOAD, LT.second, MemVT);
1536
1537 if (LA != TargetLowering::Legal && LA != TargetLowering::Custom) {
1538 // This is a vector load/store for some illegal type that is scalarized.
1539 // We must account for the cost of building or decomposing the vector.
1541 cast<VectorType>(Src), Opcode != Instruction::Store,
1542 Opcode == Instruction::Store, CostKind);
1543 }
1544 }
1545
1546 return Cost;
1547 }
1548
1550 getMaskedMemoryOpCost(unsigned Opcode, Type *DataTy, Align Alignment,
1551 unsigned AddressSpace,
1552 TTI::TargetCostKind CostKind) const override {
1553 // TODO: Pass on AddressSpace when we have test coverage.
1554 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment, true, false,
1555 CostKind);
1556 }
1557
1559 getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr,
1560 bool VariableMask, Align Alignment,
1562 const Instruction *I = nullptr) const override {
1563 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment, VariableMask,
1564 true, CostKind);
1565 }
1566
1568 getExpandCompressMemoryOpCost(unsigned Opcode, Type *DataTy,
1569 bool VariableMask, Align Alignment,
1571 const Instruction *I = nullptr) const override {
1572 // Treat expand load/compress store as gather/scatter operation.
1573 // TODO: implement more precise cost estimation for these intrinsics.
1574 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment, VariableMask,
1575 /*IsGatherScatter*/ true, CostKind);
1576 }
1577
1579 const Value *Ptr, bool VariableMask,
1580 Align Alignment,
1582 const Instruction *I) const override {
1583 // For a target without strided memory operations (or for an illegal
1584 // operation type on one which does), assume we lower to a gather/scatter
1585 // operation. (Which may in turn be scalarized.)
1586 return thisT()->getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
1587 Alignment, CostKind, I);
1588 }
1589
1591 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
1592 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
1593 bool UseMaskForCond = false, bool UseMaskForGaps = false) const override {
1594
1595 // We cannot scalarize scalable vectors, so return Invalid.
1596 if (isa<ScalableVectorType>(VecTy))
1598
1599 auto *VT = cast<FixedVectorType>(VecTy);
1600
1601 unsigned NumElts = VT->getNumElements();
1602 assert(Factor > 1 && NumElts % Factor == 0 && "Invalid interleave factor");
1603
1604 unsigned NumSubElts = NumElts / Factor;
1605 auto *SubVT = FixedVectorType::get(VT->getElementType(), NumSubElts);
1606
1607 // Firstly, the cost of load/store operation.
1609 if (UseMaskForCond || UseMaskForGaps)
1610 Cost = thisT()->getMaskedMemoryOpCost(Opcode, VecTy, Alignment,
1612 else
1613 Cost = thisT()->getMemoryOpCost(Opcode, VecTy, Alignment, AddressSpace,
1614 CostKind);
1615
1616 // Legalize the vector type, and get the legalized and unlegalized type
1617 // sizes.
1618 MVT VecTyLT = getTypeLegalizationCost(VecTy).second;
1619 unsigned VecTySize = thisT()->getDataLayout().getTypeStoreSize(VecTy);
1620 unsigned VecTyLTSize = VecTyLT.getStoreSize();
1621
1622 // Scale the cost of the memory operation by the fraction of legalized
1623 // instructions that will actually be used. We shouldn't account for the
1624 // cost of dead instructions since they will be removed.
1625 //
1626 // E.g., An interleaved load of factor 8:
1627 // %vec = load <16 x i64>, <16 x i64>* %ptr
1628 // %v0 = shufflevector %vec, undef, <0, 8>
1629 //
1630 // If <16 x i64> is legalized to 8 v2i64 loads, only 2 of the loads will be
1631 // used (those corresponding to elements [0:1] and [8:9] of the unlegalized
1632 // type). The other loads are unused.
1633 //
1634 // TODO: Note that legalization can turn masked loads/stores into unmasked
1635 // (legalized) loads/stores. This can be reflected in the cost.
1636 if (Cost.isValid() && VecTySize > VecTyLTSize) {
1637 // The number of loads of a legal type it will take to represent a load
1638 // of the unlegalized vector type.
1639 unsigned NumLegalInsts = divideCeil(VecTySize, VecTyLTSize);
1640
1641 // The number of elements of the unlegalized type that correspond to a
1642 // single legal instruction.
1643 unsigned NumEltsPerLegalInst = divideCeil(NumElts, NumLegalInsts);
1644
1645 // Determine which legal instructions will be used.
1646 BitVector UsedInsts(NumLegalInsts, false);
1647 for (unsigned Index : Indices)
1648 for (unsigned Elt = 0; Elt < NumSubElts; ++Elt)
1649 UsedInsts.set((Index + Elt * Factor) / NumEltsPerLegalInst);
1650
1651 // Scale the cost of the load by the fraction of legal instructions that
1652 // will be used.
1653 Cost = divideCeil(UsedInsts.count() * Cost.getValue(), NumLegalInsts);
1654 }
1655
1656 // Then plus the cost of interleave operation.
1657 assert(Indices.size() <= Factor &&
1658 "Interleaved memory op has too many members");
1659
1660 const APInt DemandedAllSubElts = APInt::getAllOnes(NumSubElts);
1661 const APInt DemandedAllResultElts = APInt::getAllOnes(NumElts);
1662
1663 APInt DemandedLoadStoreElts = APInt::getZero(NumElts);
1664 for (unsigned Index : Indices) {
1665 assert(Index < Factor && "Invalid index for interleaved memory op");
1666 for (unsigned Elm = 0; Elm < NumSubElts; Elm++)
1667 DemandedLoadStoreElts.setBit(Index + Elm * Factor);
1668 }
1669
1670 if (Opcode == Instruction::Load) {
1671 // The interleave cost is similar to extract sub vectors' elements
1672 // from the wide vector, and insert them into sub vectors.
1673 //
1674 // E.g. An interleaved load of factor 2 (with one member of index 0):
1675 // %vec = load <8 x i32>, <8 x i32>* %ptr
1676 // %v0 = shuffle %vec, undef, <0, 2, 4, 6> ; Index 0
1677 // The cost is estimated as extract elements at 0, 2, 4, 6 from the
1678 // <8 x i32> vector and insert them into a <4 x i32> vector.
1679 InstructionCost InsSubCost = thisT()->getScalarizationOverhead(
1680 SubVT, DemandedAllSubElts,
1681 /*Insert*/ true, /*Extract*/ false, CostKind);
1682 Cost += Indices.size() * InsSubCost;
1683 Cost += thisT()->getScalarizationOverhead(VT, DemandedLoadStoreElts,
1684 /*Insert*/ false,
1685 /*Extract*/ true, CostKind);
1686 } else {
1687 // The interleave cost is extract elements from sub vectors, and
1688 // insert them into the wide vector.
1689 //
1690 // E.g. An interleaved store of factor 3 with 2 members at indices 0,1:
1691 // (using VF=4):
1692 // %v0_v1 = shuffle %v0, %v1, <0,4,undef,1,5,undef,2,6,undef,3,7,undef>
1693 // %gaps.mask = <true, true, false, true, true, false,
1694 // true, true, false, true, true, false>
1695 // call llvm.masked.store <12 x i32> %v0_v1, <12 x i32>* %ptr,
1696 // i32 Align, <12 x i1> %gaps.mask
1697 // The cost is estimated as extract all elements (of actual members,
1698 // excluding gaps) from both <4 x i32> vectors and insert into the <12 x
1699 // i32> vector.
1700 InstructionCost ExtSubCost = thisT()->getScalarizationOverhead(
1701 SubVT, DemandedAllSubElts,
1702 /*Insert*/ false, /*Extract*/ true, CostKind);
1703 Cost += ExtSubCost * Indices.size();
1704 Cost += thisT()->getScalarizationOverhead(VT, DemandedLoadStoreElts,
1705 /*Insert*/ true,
1706 /*Extract*/ false, CostKind);
1707 }
1708
1709 if (!UseMaskForCond)
1710 return Cost;
1711
1712 Type *I8Type = Type::getInt8Ty(VT->getContext());
1713
1714 Cost += thisT()->getReplicationShuffleCost(
1715 I8Type, Factor, NumSubElts,
1716 UseMaskForGaps ? DemandedLoadStoreElts : DemandedAllResultElts,
1717 CostKind);
1718
1719 // The Gaps mask is invariant and created outside the loop, therefore the
1720 // cost of creating it is not accounted for here. However if we have both
1721 // a MaskForGaps and some other mask that guards the execution of the
1722 // memory access, we need to account for the cost of And-ing the two masks
1723 // inside the loop.
1724 if (UseMaskForGaps) {
1725 auto *MaskVT = FixedVectorType::get(I8Type, NumElts);
1726 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::And, MaskVT,
1727 CostKind);
1728 }
1729
1730 return Cost;
1731 }
1732
1733 /// Get intrinsic cost based on arguments.
1736 TTI::TargetCostKind CostKind) const override {
1737 // Check for generically free intrinsics.
1739 return 0;
1740
1741 // Assume that target intrinsics are cheap.
1742 Intrinsic::ID IID = ICA.getID();
1745
1746 // VP Intrinsics should have the same cost as their non-vp counterpart.
1747 // TODO: Adjust the cost to make the vp intrinsic cheaper than its non-vp
1748 // counterpart when the vector length argument is smaller than the maximum
1749 // vector length.
1750 // TODO: Support other kinds of VPIntrinsics
1751 if (VPIntrinsic::isVPIntrinsic(ICA.getID())) {
1752 std::optional<unsigned> FOp =
1754 if (FOp) {
1755 if (ICA.getID() == Intrinsic::vp_load) {
1756 Align Alignment;
1757 if (auto *VPI = dyn_cast_or_null<VPIntrinsic>(ICA.getInst()))
1758 Alignment = VPI->getPointerAlignment().valueOrOne();
1759 unsigned AS = 0;
1760 if (ICA.getArgTypes().size() > 1)
1761 if (auto *PtrTy = dyn_cast<PointerType>(ICA.getArgTypes()[0]))
1762 AS = PtrTy->getAddressSpace();
1763 return thisT()->getMemoryOpCost(*FOp, ICA.getReturnType(), Alignment,
1764 AS, CostKind);
1765 }
1766 if (ICA.getID() == Intrinsic::vp_store) {
1767 Align Alignment;
1768 if (auto *VPI = dyn_cast_or_null<VPIntrinsic>(ICA.getInst()))
1769 Alignment = VPI->getPointerAlignment().valueOrOne();
1770 unsigned AS = 0;
1771 if (ICA.getArgTypes().size() >= 2)
1772 if (auto *PtrTy = dyn_cast<PointerType>(ICA.getArgTypes()[1]))
1773 AS = PtrTy->getAddressSpace();
1774 return thisT()->getMemoryOpCost(*FOp, ICA.getArgTypes()[0], Alignment,
1775 AS, CostKind);
1776 }
1778 ICA.getID() == Intrinsic::vp_fneg) {
1779 return thisT()->getArithmeticInstrCost(*FOp, ICA.getReturnType(),
1780 CostKind);
1781 }
1782 if (VPCastIntrinsic::isVPCast(ICA.getID())) {
1783 return thisT()->getCastInstrCost(
1784 *FOp, ICA.getReturnType(), ICA.getArgTypes()[0],
1786 }
1787 if (VPCmpIntrinsic::isVPCmp(ICA.getID())) {
1788 // We can only handle vp_cmp intrinsics with underlying instructions.
1789 if (ICA.getInst()) {
1790 assert(FOp);
1791 auto *UI = cast<VPCmpIntrinsic>(ICA.getInst());
1792 return thisT()->getCmpSelInstrCost(*FOp, ICA.getArgTypes()[0],
1793 ICA.getReturnType(),
1794 UI->getPredicate(), CostKind);
1795 }
1796 }
1797 }
1798
1799 if (ICA.getID() == Intrinsic::vp_scatter) {
1800 if (ICA.isTypeBasedOnly()) {
1801 IntrinsicCostAttributes MaskedScatter(
1804 ICA.getFlags());
1805 return getTypeBasedIntrinsicInstrCost(MaskedScatter, CostKind);
1806 }
1807 Align Alignment;
1808 if (auto *VPI = dyn_cast_or_null<VPIntrinsic>(ICA.getInst()))
1809 Alignment = VPI->getPointerAlignment().valueOrOne();
1810 bool VarMask = isa<Constant>(ICA.getArgs()[2]);
1811 return thisT()->getGatherScatterOpCost(
1812 Instruction::Store, ICA.getArgTypes()[0], ICA.getArgs()[1], VarMask,
1813 Alignment, CostKind, nullptr);
1814 }
1815 if (ICA.getID() == Intrinsic::vp_gather) {
1816 if (ICA.isTypeBasedOnly()) {
1817 IntrinsicCostAttributes MaskedGather(
1820 ICA.getFlags());
1821 return getTypeBasedIntrinsicInstrCost(MaskedGather, CostKind);
1822 }
1823 Align Alignment;
1824 if (auto *VPI = dyn_cast_or_null<VPIntrinsic>(ICA.getInst()))
1825 Alignment = VPI->getPointerAlignment().valueOrOne();
1826 bool VarMask = isa<Constant>(ICA.getArgs()[1]);
1827 return thisT()->getGatherScatterOpCost(
1828 Instruction::Load, ICA.getReturnType(), ICA.getArgs()[0], VarMask,
1829 Alignment, CostKind, nullptr);
1830 }
1831
1832 if (ICA.getID() == Intrinsic::vp_select ||
1833 ICA.getID() == Intrinsic::vp_merge) {
1834 TTI::OperandValueInfo OpInfoX, OpInfoY;
1835 if (!ICA.isTypeBasedOnly()) {
1836 OpInfoX = TTI::getOperandInfo(ICA.getArgs()[0]);
1837 OpInfoY = TTI::getOperandInfo(ICA.getArgs()[1]);
1838 }
1839 return getCmpSelInstrCost(
1840 Instruction::Select, ICA.getReturnType(), ICA.getArgTypes()[0],
1841 CmpInst::BAD_ICMP_PREDICATE, CostKind, OpInfoX, OpInfoY);
1842 }
1843
1844 std::optional<Intrinsic::ID> FID =
1846
1847 // Not functionally equivalent but close enough for cost modelling.
1848 if (ICA.getID() == Intrinsic::experimental_vp_reverse)
1849 FID = Intrinsic::vector_reverse;
1850
1851 if (FID) {
1852 // Non-vp version will have same arg types except mask and vector
1853 // length.
1854 assert(ICA.getArgTypes().size() >= 2 &&
1855 "Expected VPIntrinsic to have Mask and Vector Length args and "
1856 "types");
1857
1858 ArrayRef<const Value *> NewArgs = ArrayRef(ICA.getArgs());
1859 if (!ICA.isTypeBasedOnly())
1860 NewArgs = NewArgs.drop_back(2);
1862
1863 // VPReduction intrinsics have a start value argument that their non-vp
1864 // counterparts do not have, except for the fadd and fmul non-vp
1865 // counterpart.
1867 *FID != Intrinsic::vector_reduce_fadd &&
1868 *FID != Intrinsic::vector_reduce_fmul) {
1869 if (!ICA.isTypeBasedOnly())
1870 NewArgs = NewArgs.drop_front();
1871 NewTys = NewTys.drop_front();
1872 }
1873
1874 IntrinsicCostAttributes NewICA(*FID, ICA.getReturnType(), NewArgs,
1875 NewTys, ICA.getFlags());
1876 return thisT()->getIntrinsicInstrCost(NewICA, CostKind);
1877 }
1878 }
1879
1880 if (ICA.isTypeBasedOnly())
1882
1883 Type *RetTy = ICA.getReturnType();
1884
1885 ElementCount RetVF = isVectorizedTy(RetTy) ? getVectorizedTypeVF(RetTy)
1887
1888 const IntrinsicInst *I = ICA.getInst();
1889 const SmallVectorImpl<const Value *> &Args = ICA.getArgs();
1890 FastMathFlags FMF = ICA.getFlags();
1891 switch (IID) {
1892 default:
1893 break;
1894
1895 case Intrinsic::powi:
1896 if (auto *RHSC = dyn_cast<ConstantInt>(Args[1])) {
1897 bool ShouldOptForSize = I->getParent()->getParent()->hasOptSize();
1898 if (getTLI()->isBeneficialToExpandPowI(RHSC->getSExtValue(),
1899 ShouldOptForSize)) {
1900 // The cost is modeled on the expansion performed by ExpandPowI in
1901 // SelectionDAGBuilder.
1902 APInt Exponent = RHSC->getValue().abs();
1903 unsigned ActiveBits = Exponent.getActiveBits();
1904 unsigned PopCount = Exponent.popcount();
1905 InstructionCost Cost = (ActiveBits + PopCount - 2) *
1906 thisT()->getArithmeticInstrCost(
1907 Instruction::FMul, RetTy, CostKind);
1908 if (RHSC->isNegative())
1909 Cost += thisT()->getArithmeticInstrCost(Instruction::FDiv, RetTy,
1910 CostKind);
1911 return Cost;
1912 }
1913 }
1914 break;
1915 case Intrinsic::cttz:
1916 // FIXME: If necessary, this should go in target-specific overrides.
1917 if (RetVF.isScalar() && getTLI()->isCheapToSpeculateCttz(RetTy))
1919 break;
1920
1921 case Intrinsic::ctlz:
1922 // FIXME: If necessary, this should go in target-specific overrides.
1923 if (RetVF.isScalar() && getTLI()->isCheapToSpeculateCtlz(RetTy))
1925 break;
1926
1927 case Intrinsic::memcpy:
1928 return thisT()->getMemcpyCost(ICA.getInst());
1929
1930 case Intrinsic::masked_scatter: {
1931 const Value *Mask = Args[2];
1932 bool VarMask = !isa<Constant>(Mask);
1933 Align Alignment = I->getParamAlign(1).valueOrOne();
1934 return thisT()->getGatherScatterOpCost(Instruction::Store,
1935 ICA.getArgTypes()[0], Args[1],
1936 VarMask, Alignment, CostKind, I);
1937 }
1938 case Intrinsic::masked_gather: {
1939 const Value *Mask = Args[1];
1940 bool VarMask = !isa<Constant>(Mask);
1941 Align Alignment = I->getParamAlign(0).valueOrOne();
1942 return thisT()->getGatherScatterOpCost(Instruction::Load, RetTy, Args[0],
1943 VarMask, Alignment, CostKind, I);
1944 }
1945 case Intrinsic::masked_compressstore: {
1946 const Value *Data = Args[0];
1947 const Value *Mask = Args[2];
1948 Align Alignment = I->getParamAlign(1).valueOrOne();
1949 return thisT()->getExpandCompressMemoryOpCost(
1950 Instruction::Store, Data->getType(), !isa<Constant>(Mask), Alignment,
1951 CostKind, I);
1952 }
1953 case Intrinsic::masked_expandload: {
1954 const Value *Mask = Args[1];
1955 Align Alignment = I->getParamAlign(0).valueOrOne();
1956 return thisT()->getExpandCompressMemoryOpCost(Instruction::Load, RetTy,
1957 !isa<Constant>(Mask),
1958 Alignment, CostKind, I);
1959 }
1960 case Intrinsic::experimental_vp_strided_store: {
1961 const Value *Data = Args[0];
1962 const Value *Ptr = Args[1];
1963 const Value *Mask = Args[3];
1964 const Value *EVL = Args[4];
1965 bool VarMask = !isa<Constant>(Mask) || !isa<Constant>(EVL);
1966 Type *EltTy = cast<VectorType>(Data->getType())->getElementType();
1967 Align Alignment =
1968 I->getParamAlign(1).value_or(thisT()->DL.getABITypeAlign(EltTy));
1969 return thisT()->getStridedMemoryOpCost(Instruction::Store,
1970 Data->getType(), Ptr, VarMask,
1971 Alignment, CostKind, I);
1972 }
1973 case Intrinsic::experimental_vp_strided_load: {
1974 const Value *Ptr = Args[0];
1975 const Value *Mask = Args[2];
1976 const Value *EVL = Args[3];
1977 bool VarMask = !isa<Constant>(Mask) || !isa<Constant>(EVL);
1978 Type *EltTy = cast<VectorType>(RetTy)->getElementType();
1979 Align Alignment =
1980 I->getParamAlign(0).value_or(thisT()->DL.getABITypeAlign(EltTy));
1981 return thisT()->getStridedMemoryOpCost(Instruction::Load, RetTy, Ptr,
1982 VarMask, Alignment, CostKind, I);
1983 }
1984 case Intrinsic::stepvector: {
1985 if (isa<ScalableVectorType>(RetTy))
1987 // The cost of materialising a constant integer vector.
1989 }
1990 case Intrinsic::vector_extract: {
1991 // FIXME: Handle case where a scalable vector is extracted from a scalable
1992 // vector
1993 if (isa<ScalableVectorType>(RetTy))
1995 unsigned Index = cast<ConstantInt>(Args[1])->getZExtValue();
1996 return thisT()->getShuffleCost(TTI::SK_ExtractSubvector,
1997 cast<VectorType>(RetTy),
1998 cast<VectorType>(Args[0]->getType()), {},
1999 CostKind, Index, cast<VectorType>(RetTy));
2000 }
2001 case Intrinsic::vector_insert: {
2002 // FIXME: Handle case where a scalable vector is inserted into a scalable
2003 // vector
2004 if (isa<ScalableVectorType>(Args[1]->getType()))
2006 unsigned Index = cast<ConstantInt>(Args[2])->getZExtValue();
2007 return thisT()->getShuffleCost(
2009 cast<VectorType>(Args[0]->getType()), {}, CostKind, Index,
2010 cast<VectorType>(Args[1]->getType()));
2011 }
2012 case Intrinsic::vector_splice: {
2013 unsigned Index = cast<ConstantInt>(Args[2])->getZExtValue();
2014 return thisT()->getShuffleCost(TTI::SK_Splice, cast<VectorType>(RetTy),
2015 cast<VectorType>(Args[0]->getType()), {},
2016 CostKind, Index, cast<VectorType>(RetTy));
2017 }
2018 case Intrinsic::vector_reduce_add:
2019 case Intrinsic::vector_reduce_mul:
2020 case Intrinsic::vector_reduce_and:
2021 case Intrinsic::vector_reduce_or:
2022 case Intrinsic::vector_reduce_xor:
2023 case Intrinsic::vector_reduce_smax:
2024 case Intrinsic::vector_reduce_smin:
2025 case Intrinsic::vector_reduce_fmax:
2026 case Intrinsic::vector_reduce_fmin:
2027 case Intrinsic::vector_reduce_fmaximum:
2028 case Intrinsic::vector_reduce_fminimum:
2029 case Intrinsic::vector_reduce_umax:
2030 case Intrinsic::vector_reduce_umin: {
2031 IntrinsicCostAttributes Attrs(IID, RetTy, Args[0]->getType(), FMF, I, 1);
2033 }
2034 case Intrinsic::vector_reduce_fadd:
2035 case Intrinsic::vector_reduce_fmul: {
2037 IID, RetTy, {Args[0]->getType(), Args[1]->getType()}, FMF, I, 1);
2039 }
2040 case Intrinsic::fshl:
2041 case Intrinsic::fshr: {
2042 const Value *X = Args[0];
2043 const Value *Y = Args[1];
2044 const Value *Z = Args[2];
2047 const TTI::OperandValueInfo OpInfoZ = TTI::getOperandInfo(Z);
2048
2049 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
2050 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
2052 Cost +=
2053 thisT()->getArithmeticInstrCost(BinaryOperator::Or, RetTy, CostKind);
2054 Cost +=
2055 thisT()->getArithmeticInstrCost(BinaryOperator::Sub, RetTy, CostKind);
2056 Cost += thisT()->getArithmeticInstrCost(
2057 BinaryOperator::Shl, RetTy, CostKind, OpInfoX,
2058 {OpInfoZ.Kind, TTI::OP_None});
2059 Cost += thisT()->getArithmeticInstrCost(
2060 BinaryOperator::LShr, RetTy, CostKind, OpInfoY,
2061 {OpInfoZ.Kind, TTI::OP_None});
2062 // Non-constant shift amounts requires a modulo. If the typesize is a
2063 // power-2 then this will be converted to an and, otherwise it will use a
2064 // urem.
2065 if (!OpInfoZ.isConstant())
2066 Cost += thisT()->getArithmeticInstrCost(
2067 isPowerOf2_32(RetTy->getScalarSizeInBits()) ? BinaryOperator::And
2068 : BinaryOperator::URem,
2069 RetTy, CostKind, OpInfoZ,
2070 {TTI::OK_UniformConstantValue, TTI::OP_None});
2071 // For non-rotates (X != Y) we must add shift-by-zero handling costs.
2072 if (X != Y) {
2073 Type *CondTy = RetTy->getWithNewBitWidth(1);
2074 Cost +=
2075 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2077 Cost +=
2078 thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2080 }
2081 return Cost;
2082 }
2083 case Intrinsic::experimental_cttz_elts: {
2084 EVT ArgType = getTLI()->getValueType(DL, ICA.getArgTypes()[0], true);
2085
2086 // If we're not expanding the intrinsic then we assume this is cheap
2087 // to implement.
2088 if (!getTLI()->shouldExpandCttzElements(ArgType))
2089 return getTypeLegalizationCost(RetTy).first;
2090
2091 // TODO: The costs below reflect the expansion code in
2092 // SelectionDAGBuilder, but we may want to sacrifice some accuracy in
2093 // favour of compile time.
2094
2095 // Find the smallest "sensible" element type to use for the expansion.
2096 bool ZeroIsPoison = !cast<ConstantInt>(Args[1])->isZero();
2097 ConstantRange VScaleRange(APInt(64, 1), APInt::getZero(64));
2098 if (isa<ScalableVectorType>(ICA.getArgTypes()[0]) && I && I->getCaller())
2099 VScaleRange = getVScaleRange(I->getCaller(), 64);
2100
2101 unsigned EltWidth = getTLI()->getBitWidthForCttzElements(
2102 RetTy, ArgType.getVectorElementCount(), ZeroIsPoison, &VScaleRange);
2103 Type *NewEltTy = IntegerType::getIntNTy(RetTy->getContext(), EltWidth);
2104
2105 // Create the new vector type & get the vector length
2106 Type *NewVecTy = VectorType::get(
2107 NewEltTy, cast<VectorType>(Args[0]->getType())->getElementCount());
2108
2109 IntrinsicCostAttributes StepVecAttrs(Intrinsic::stepvector, NewVecTy, {},
2110 FMF);
2112 thisT()->getIntrinsicInstrCost(StepVecAttrs, CostKind);
2113
2114 Cost +=
2115 thisT()->getArithmeticInstrCost(Instruction::Sub, NewVecTy, CostKind);
2116 Cost += thisT()->getCastInstrCost(Instruction::SExt, NewVecTy,
2117 Args[0]->getType(),
2119 Cost +=
2120 thisT()->getArithmeticInstrCost(Instruction::And, NewVecTy, CostKind);
2121
2122 IntrinsicCostAttributes ReducAttrs(Intrinsic::vector_reduce_umax,
2123 NewEltTy, NewVecTy, FMF, I, 1);
2124 Cost += thisT()->getTypeBasedIntrinsicInstrCost(ReducAttrs, CostKind);
2125 Cost +=
2126 thisT()->getArithmeticInstrCost(Instruction::Sub, NewEltTy, CostKind);
2127
2128 return Cost;
2129 }
2130 case Intrinsic::get_active_lane_mask:
2131 case Intrinsic::experimental_vector_match:
2132 case Intrinsic::experimental_vector_histogram_add:
2133 case Intrinsic::experimental_vector_histogram_uadd_sat:
2134 case Intrinsic::experimental_vector_histogram_umax:
2135 case Intrinsic::experimental_vector_histogram_umin:
2136 return thisT()->getTypeBasedIntrinsicInstrCost(ICA, CostKind);
2137 case Intrinsic::modf:
2138 case Intrinsic::sincos:
2139 case Intrinsic::sincospi: {
2140 Type *Ty = getContainedTypes(RetTy).front();
2141 EVT VT = getTLI()->getValueType(DL, Ty);
2142
2143 RTLIB::Libcall LC = [&] {
2144 switch (ICA.getID()) {
2145 case Intrinsic::modf:
2146 return RTLIB::getMODF;
2147 case Intrinsic::sincos:
2148 return RTLIB::getSINCOS;
2149 case Intrinsic::sincospi:
2150 return RTLIB::getSINCOSPI;
2151 default:
2152 llvm_unreachable("unexpected intrinsic");
2153 }
2154 }()(VT.getScalarType());
2155
2156 std::optional<unsigned> CallRetElementIndex;
2157 // The first element of the modf result is returned by value in the
2158 // libcall.
2159 if (ICA.getID() == Intrinsic::modf)
2160 CallRetElementIndex = 0;
2161
2162 if (auto Cost = getMultipleResultIntrinsicVectorLibCallCost(
2163 ICA, CostKind, LC, CallRetElementIndex))
2164 return *Cost;
2165 // Otherwise, fallback to default scalarization cost.
2166 break;
2167 }
2168 }
2169
2170 // Assume that we need to scalarize this intrinsic.)
2171 // Compute the scalarization overhead based on Args for a vector
2172 // intrinsic.
2173 InstructionCost ScalarizationCost = InstructionCost::getInvalid();
2174 if (RetVF.isVector() && !RetVF.isScalable()) {
2175 ScalarizationCost = 0;
2176 if (!RetTy->isVoidTy()) {
2177 for (Type *VectorTy : getContainedTypes(RetTy)) {
2178 ScalarizationCost += getScalarizationOverhead(
2179 cast<VectorType>(VectorTy),
2180 /*Insert=*/true, /*Extract=*/false, CostKind);
2181 }
2182 }
2183 ScalarizationCost += getOperandsScalarizationOverhead(
2184 filterConstantAndDuplicatedOperands(Args, ICA.getArgTypes()),
2185 CostKind);
2186 }
2187
2188 IntrinsicCostAttributes Attrs(IID, RetTy, ICA.getArgTypes(), FMF, I,
2189 ScalarizationCost);
2190 return thisT()->getTypeBasedIntrinsicInstrCost(Attrs, CostKind);
2191 }
2192
2193 /// Get intrinsic cost based on argument types.
2194 /// If ScalarizationCostPassed is std::numeric_limits<unsigned>::max(), the
2195 /// cost of scalarizing the arguments and the return value will be computed
2196 /// based on types.
2200 Intrinsic::ID IID = ICA.getID();
2201 Type *RetTy = ICA.getReturnType();
2202 const SmallVectorImpl<Type *> &Tys = ICA.getArgTypes();
2203 FastMathFlags FMF = ICA.getFlags();
2204 InstructionCost ScalarizationCostPassed = ICA.getScalarizationCost();
2205 bool SkipScalarizationCost = ICA.skipScalarizationCost();
2206
2207 VectorType *VecOpTy = nullptr;
2208 if (!Tys.empty()) {
2209 // The vector reduction operand is operand 0 except for fadd/fmul.
2210 // Their operand 0 is a scalar start value, so the vector op is operand 1.
2211 unsigned VecTyIndex = 0;
2212 if (IID == Intrinsic::vector_reduce_fadd ||
2213 IID == Intrinsic::vector_reduce_fmul)
2214 VecTyIndex = 1;
2215 assert(Tys.size() > VecTyIndex && "Unexpected IntrinsicCostAttributes");
2216 VecOpTy = dyn_cast<VectorType>(Tys[VecTyIndex]);
2217 }
2218
2219 // Library call cost - other than size, make it expensive.
2220 unsigned SingleCallCost = CostKind == TTI::TCK_CodeSize ? 1 : 10;
2221 unsigned ISD = 0;
2222 switch (IID) {
2223 default: {
2224 // Scalable vectors cannot be scalarized, so return Invalid.
2225 if (isa<ScalableVectorType>(RetTy) || any_of(Tys, [](const Type *Ty) {
2226 return isa<ScalableVectorType>(Ty);
2227 }))
2229
2230 // Assume that we need to scalarize this intrinsic.
2231 InstructionCost ScalarizationCost =
2232 SkipScalarizationCost ? ScalarizationCostPassed : 0;
2233 unsigned ScalarCalls = 1;
2234 Type *ScalarRetTy = RetTy;
2235 if (auto *RetVTy = dyn_cast<VectorType>(RetTy)) {
2236 if (!SkipScalarizationCost)
2237 ScalarizationCost = getScalarizationOverhead(
2238 RetVTy, /*Insert*/ true, /*Extract*/ false, CostKind);
2239 ScalarCalls = std::max(ScalarCalls,
2241 ScalarRetTy = RetTy->getScalarType();
2242 }
2243 SmallVector<Type *, 4> ScalarTys;
2244 for (Type *Ty : Tys) {
2245 if (auto *VTy = dyn_cast<VectorType>(Ty)) {
2246 if (!SkipScalarizationCost)
2247 ScalarizationCost += getScalarizationOverhead(
2248 VTy, /*Insert*/ false, /*Extract*/ true, CostKind);
2249 ScalarCalls = std::max(ScalarCalls,
2251 Ty = Ty->getScalarType();
2252 }
2253 ScalarTys.push_back(Ty);
2254 }
2255 if (ScalarCalls == 1)
2256 return 1; // Return cost of a scalar intrinsic. Assume it to be cheap.
2257
2258 IntrinsicCostAttributes ScalarAttrs(IID, ScalarRetTy, ScalarTys, FMF);
2259 InstructionCost ScalarCost =
2260 thisT()->getIntrinsicInstrCost(ScalarAttrs, CostKind);
2261
2262 return ScalarCalls * ScalarCost + ScalarizationCost;
2263 }
2264 // Look for intrinsics that can be lowered directly or turned into a scalar
2265 // intrinsic call.
2266 case Intrinsic::sqrt:
2267 ISD = ISD::FSQRT;
2268 break;
2269 case Intrinsic::sin:
2270 ISD = ISD::FSIN;
2271 break;
2272 case Intrinsic::cos:
2273 ISD = ISD::FCOS;
2274 break;
2275 case Intrinsic::sincos:
2276 ISD = ISD::FSINCOS;
2277 break;
2278 case Intrinsic::sincospi:
2279 ISD = ISD::FSINCOSPI;
2280 break;
2281 case Intrinsic::modf:
2282 ISD = ISD::FMODF;
2283 break;
2284 case Intrinsic::tan:
2285 ISD = ISD::FTAN;
2286 break;
2287 case Intrinsic::asin:
2288 ISD = ISD::FASIN;
2289 break;
2290 case Intrinsic::acos:
2291 ISD = ISD::FACOS;
2292 break;
2293 case Intrinsic::atan:
2294 ISD = ISD::FATAN;
2295 break;
2296 case Intrinsic::atan2:
2297 ISD = ISD::FATAN2;
2298 break;
2299 case Intrinsic::sinh:
2300 ISD = ISD::FSINH;
2301 break;
2302 case Intrinsic::cosh:
2303 ISD = ISD::FCOSH;
2304 break;
2305 case Intrinsic::tanh:
2306 ISD = ISD::FTANH;
2307 break;
2308 case Intrinsic::exp:
2309 ISD = ISD::FEXP;
2310 break;
2311 case Intrinsic::exp2:
2312 ISD = ISD::FEXP2;
2313 break;
2314 case Intrinsic::exp10:
2315 ISD = ISD::FEXP10;
2316 break;
2317 case Intrinsic::log:
2318 ISD = ISD::FLOG;
2319 break;
2320 case Intrinsic::log10:
2321 ISD = ISD::FLOG10;
2322 break;
2323 case Intrinsic::log2:
2324 ISD = ISD::FLOG2;
2325 break;
2326 case Intrinsic::ldexp:
2327 ISD = ISD::FLDEXP;
2328 break;
2329 case Intrinsic::fabs:
2330 ISD = ISD::FABS;
2331 break;
2332 case Intrinsic::canonicalize:
2334 break;
2335 case Intrinsic::minnum:
2336 ISD = ISD::FMINNUM;
2337 break;
2338 case Intrinsic::maxnum:
2339 ISD = ISD::FMAXNUM;
2340 break;
2341 case Intrinsic::minimum:
2342 ISD = ISD::FMINIMUM;
2343 break;
2344 case Intrinsic::maximum:
2345 ISD = ISD::FMAXIMUM;
2346 break;
2347 case Intrinsic::minimumnum:
2348 ISD = ISD::FMINIMUMNUM;
2349 break;
2350 case Intrinsic::maximumnum:
2351 ISD = ISD::FMAXIMUMNUM;
2352 break;
2353 case Intrinsic::copysign:
2355 break;
2356 case Intrinsic::floor:
2357 ISD = ISD::FFLOOR;
2358 break;
2359 case Intrinsic::ceil:
2360 ISD = ISD::FCEIL;
2361 break;
2362 case Intrinsic::trunc:
2363 ISD = ISD::FTRUNC;
2364 break;
2365 case Intrinsic::nearbyint:
2366 ISD = ISD::FNEARBYINT;
2367 break;
2368 case Intrinsic::rint:
2369 ISD = ISD::FRINT;
2370 break;
2371 case Intrinsic::lrint:
2372 ISD = ISD::LRINT;
2373 break;
2374 case Intrinsic::llrint:
2375 ISD = ISD::LLRINT;
2376 break;
2377 case Intrinsic::round:
2378 ISD = ISD::FROUND;
2379 break;
2380 case Intrinsic::roundeven:
2381 ISD = ISD::FROUNDEVEN;
2382 break;
2383 case Intrinsic::lround:
2384 ISD = ISD::LROUND;
2385 break;
2386 case Intrinsic::llround:
2387 ISD = ISD::LLROUND;
2388 break;
2389 case Intrinsic::pow:
2390 ISD = ISD::FPOW;
2391 break;
2392 case Intrinsic::fma:
2393 ISD = ISD::FMA;
2394 break;
2395 case Intrinsic::fmuladd:
2396 ISD = ISD::FMA;
2397 break;
2398 case Intrinsic::experimental_constrained_fmuladd:
2400 break;
2401 // FIXME: We should return 0 whenever getIntrinsicCost == TCC_Free.
2402 case Intrinsic::lifetime_start:
2403 case Intrinsic::lifetime_end:
2404 case Intrinsic::sideeffect:
2405 case Intrinsic::pseudoprobe:
2406 case Intrinsic::arithmetic_fence:
2407 return 0;
2408 case Intrinsic::masked_store: {
2409 Type *Ty = Tys[0];
2410 Align TyAlign = thisT()->DL.getABITypeAlign(Ty);
2411 return thisT()->getMaskedMemoryOpCost(Instruction::Store, Ty, TyAlign, 0,
2412 CostKind);
2413 }
2414 case Intrinsic::masked_load: {
2415 Type *Ty = RetTy;
2416 Align TyAlign = thisT()->DL.getABITypeAlign(Ty);
2417 return thisT()->getMaskedMemoryOpCost(Instruction::Load, Ty, TyAlign, 0,
2418 CostKind);
2419 }
2420 case Intrinsic::experimental_vp_strided_store: {
2421 auto *Ty = cast<VectorType>(ICA.getArgTypes()[0]);
2422 Align Alignment = thisT()->DL.getABITypeAlign(Ty->getElementType());
2423 return thisT()->getStridedMemoryOpCost(
2424 Instruction::Store, Ty, /*Ptr=*/nullptr, /*VariableMask=*/true,
2425 Alignment, CostKind, ICA.getInst());
2426 }
2427 case Intrinsic::experimental_vp_strided_load: {
2428 auto *Ty = cast<VectorType>(ICA.getReturnType());
2429 Align Alignment = thisT()->DL.getABITypeAlign(Ty->getElementType());
2430 return thisT()->getStridedMemoryOpCost(
2431 Instruction::Load, Ty, /*Ptr=*/nullptr, /*VariableMask=*/true,
2432 Alignment, CostKind, ICA.getInst());
2433 }
2434 case Intrinsic::vector_reduce_add:
2435 case Intrinsic::vector_reduce_mul:
2436 case Intrinsic::vector_reduce_and:
2437 case Intrinsic::vector_reduce_or:
2438 case Intrinsic::vector_reduce_xor:
2439 return thisT()->getArithmeticReductionCost(
2440 getArithmeticReductionInstruction(IID), VecOpTy, std::nullopt,
2441 CostKind);
2442 case Intrinsic::vector_reduce_fadd:
2443 case Intrinsic::vector_reduce_fmul:
2444 return thisT()->getArithmeticReductionCost(
2445 getArithmeticReductionInstruction(IID), VecOpTy, FMF, CostKind);
2446 case Intrinsic::vector_reduce_smax:
2447 case Intrinsic::vector_reduce_smin:
2448 case Intrinsic::vector_reduce_umax:
2449 case Intrinsic::vector_reduce_umin:
2450 case Intrinsic::vector_reduce_fmax:
2451 case Intrinsic::vector_reduce_fmin:
2452 case Intrinsic::vector_reduce_fmaximum:
2453 case Intrinsic::vector_reduce_fminimum:
2454 return thisT()->getMinMaxReductionCost(getMinMaxReductionIntrinsicOp(IID),
2455 VecOpTy, ICA.getFlags(), CostKind);
2456 case Intrinsic::experimental_vector_match: {
2457 auto *SearchTy = cast<VectorType>(ICA.getArgTypes()[0]);
2458 auto *NeedleTy = cast<FixedVectorType>(ICA.getArgTypes()[1]);
2459 unsigned SearchSize = NeedleTy->getNumElements();
2460
2461 // If we're not expanding the intrinsic then we assume this is cheap to
2462 // implement.
2463 EVT SearchVT = getTLI()->getValueType(DL, SearchTy);
2464 if (!getTLI()->shouldExpandVectorMatch(SearchVT, SearchSize))
2465 return getTypeLegalizationCost(RetTy).first;
2466
2467 // Approximate the cost based on the expansion code in
2468 // SelectionDAGBuilder.
2470 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, NeedleTy,
2471 CostKind, 1, nullptr, nullptr);
2472 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, SearchTy,
2473 CostKind, 0, nullptr, nullptr);
2474 Cost += thisT()->getShuffleCost(TTI::SK_Broadcast, SearchTy, SearchTy, {},
2475 CostKind, 0, nullptr);
2476 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, SearchTy, RetTy,
2478 Cost +=
2479 thisT()->getArithmeticInstrCost(BinaryOperator::Or, RetTy, CostKind);
2480 Cost *= SearchSize;
2481 Cost +=
2482 thisT()->getArithmeticInstrCost(BinaryOperator::And, RetTy, CostKind);
2483 return Cost;
2484 }
2485 case Intrinsic::vector_reverse:
2486 return thisT()->getShuffleCost(TTI::SK_Reverse, cast<VectorType>(RetTy),
2487 cast<VectorType>(ICA.getArgTypes()[0]), {},
2488 CostKind, 0, cast<VectorType>(RetTy));
2489 case Intrinsic::experimental_vector_histogram_add:
2490 case Intrinsic::experimental_vector_histogram_uadd_sat:
2491 case Intrinsic::experimental_vector_histogram_umax:
2492 case Intrinsic::experimental_vector_histogram_umin: {
2494 Type *EltTy = ICA.getArgTypes()[1];
2495
2496 // Targets with scalable vectors must handle this on their own.
2497 if (!PtrsTy)
2499
2500 Align Alignment = thisT()->DL.getABITypeAlign(EltTy);
2502 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, PtrsTy,
2503 CostKind, 1, nullptr, nullptr);
2504 Cost += thisT()->getMemoryOpCost(Instruction::Load, EltTy, Alignment, 0,
2505 CostKind);
2506 switch (IID) {
2507 default:
2508 llvm_unreachable("Unhandled histogram update operation.");
2509 case Intrinsic::experimental_vector_histogram_add:
2510 Cost +=
2511 thisT()->getArithmeticInstrCost(Instruction::Add, EltTy, CostKind);
2512 break;
2513 case Intrinsic::experimental_vector_histogram_uadd_sat: {
2514 IntrinsicCostAttributes UAddSat(Intrinsic::uadd_sat, EltTy, {EltTy});
2515 Cost += thisT()->getIntrinsicInstrCost(UAddSat, CostKind);
2516 break;
2517 }
2518 case Intrinsic::experimental_vector_histogram_umax: {
2519 IntrinsicCostAttributes UMax(Intrinsic::umax, EltTy, {EltTy});
2520 Cost += thisT()->getIntrinsicInstrCost(UMax, CostKind);
2521 break;
2522 }
2523 case Intrinsic::experimental_vector_histogram_umin: {
2524 IntrinsicCostAttributes UMin(Intrinsic::umin, EltTy, {EltTy});
2525 Cost += thisT()->getIntrinsicInstrCost(UMin, CostKind);
2526 break;
2527 }
2528 }
2529 Cost += thisT()->getMemoryOpCost(Instruction::Store, EltTy, Alignment, 0,
2530 CostKind);
2531 Cost *= PtrsTy->getNumElements();
2532 return Cost;
2533 }
2534 case Intrinsic::get_active_lane_mask: {
2535 Type *ArgTy = ICA.getArgTypes()[0];
2536 EVT ResVT = getTLI()->getValueType(DL, RetTy, true);
2537 EVT ArgVT = getTLI()->getValueType(DL, ArgTy, true);
2538
2539 // If we're not expanding the intrinsic then we assume this is cheap
2540 // to implement.
2541 if (!getTLI()->shouldExpandGetActiveLaneMask(ResVT, ArgVT))
2542 return getTypeLegalizationCost(RetTy).first;
2543
2544 // Create the expanded types that will be used to calculate the uadd_sat
2545 // operation.
2546 Type *ExpRetTy =
2547 VectorType::get(ArgTy, cast<VectorType>(RetTy)->getElementCount());
2548 IntrinsicCostAttributes Attrs(Intrinsic::uadd_sat, ExpRetTy, {}, FMF);
2550 thisT()->getTypeBasedIntrinsicInstrCost(Attrs, CostKind);
2551 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, ExpRetTy, RetTy,
2553 return Cost;
2554 }
2555 case Intrinsic::experimental_memset_pattern:
2556 // This cost is set to match the cost of the memset_pattern16 libcall.
2557 // It should likely be re-evaluated after migration to this intrinsic
2558 // is complete.
2559 return TTI::TCC_Basic * 4;
2560 case Intrinsic::abs:
2561 ISD = ISD::ABS;
2562 break;
2563 case Intrinsic::fshl:
2564 ISD = ISD::FSHL;
2565 break;
2566 case Intrinsic::fshr:
2567 ISD = ISD::FSHR;
2568 break;
2569 case Intrinsic::smax:
2570 ISD = ISD::SMAX;
2571 break;
2572 case Intrinsic::smin:
2573 ISD = ISD::SMIN;
2574 break;
2575 case Intrinsic::umax:
2576 ISD = ISD::UMAX;
2577 break;
2578 case Intrinsic::umin:
2579 ISD = ISD::UMIN;
2580 break;
2581 case Intrinsic::sadd_sat:
2582 ISD = ISD::SADDSAT;
2583 break;
2584 case Intrinsic::ssub_sat:
2585 ISD = ISD::SSUBSAT;
2586 break;
2587 case Intrinsic::uadd_sat:
2588 ISD = ISD::UADDSAT;
2589 break;
2590 case Intrinsic::usub_sat:
2591 ISD = ISD::USUBSAT;
2592 break;
2593 case Intrinsic::smul_fix:
2594 ISD = ISD::SMULFIX;
2595 break;
2596 case Intrinsic::umul_fix:
2597 ISD = ISD::UMULFIX;
2598 break;
2599 case Intrinsic::sadd_with_overflow:
2600 ISD = ISD::SADDO;
2601 break;
2602 case Intrinsic::ssub_with_overflow:
2603 ISD = ISD::SSUBO;
2604 break;
2605 case Intrinsic::uadd_with_overflow:
2606 ISD = ISD::UADDO;
2607 break;
2608 case Intrinsic::usub_with_overflow:
2609 ISD = ISD::USUBO;
2610 break;
2611 case Intrinsic::smul_with_overflow:
2612 ISD = ISD::SMULO;
2613 break;
2614 case Intrinsic::umul_with_overflow:
2615 ISD = ISD::UMULO;
2616 break;
2617 case Intrinsic::fptosi_sat:
2618 case Intrinsic::fptoui_sat: {
2619 std::pair<InstructionCost, MVT> SrcLT = getTypeLegalizationCost(Tys[0]);
2620 std::pair<InstructionCost, MVT> RetLT = getTypeLegalizationCost(RetTy);
2621
2622 // For cast instructions, types are different between source and
2623 // destination. Also need to check if the source type can be legalize.
2624 if (!SrcLT.first.isValid() || !RetLT.first.isValid())
2626 ISD = IID == Intrinsic::fptosi_sat ? ISD::FP_TO_SINT_SAT
2628 break;
2629 }
2630 case Intrinsic::ctpop:
2631 ISD = ISD::CTPOP;
2632 // In case of legalization use TCC_Expensive. This is cheaper than a
2633 // library call but still not a cheap instruction.
2634 SingleCallCost = TargetTransformInfo::TCC_Expensive;
2635 break;
2636 case Intrinsic::ctlz:
2637 ISD = ISD::CTLZ;
2638 break;
2639 case Intrinsic::cttz:
2640 ISD = ISD::CTTZ;
2641 break;
2642 case Intrinsic::bswap:
2643 ISD = ISD::BSWAP;
2644 break;
2645 case Intrinsic::bitreverse:
2647 break;
2648 case Intrinsic::ucmp:
2649 ISD = ISD::UCMP;
2650 break;
2651 case Intrinsic::scmp:
2652 ISD = ISD::SCMP;
2653 break;
2654 }
2655
2656 auto *ST = dyn_cast<StructType>(RetTy);
2657 Type *LegalizeTy = ST ? ST->getContainedType(0) : RetTy;
2658 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(LegalizeTy);
2659
2660 const TargetLoweringBase *TLI = getTLI();
2661
2662 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
2663 if (IID == Intrinsic::fabs && LT.second.isFloatingPoint() &&
2664 TLI->isFAbsFree(LT.second)) {
2665 return 0;
2666 }
2667
2668 // The operation is legal. Assume it costs 1.
2669 // If the type is split to multiple registers, assume that there is some
2670 // overhead to this.
2671 // TODO: Once we have extract/insert subvector cost we need to use them.
2672 if (LT.first > 1)
2673 return (LT.first * 2);
2674 else
2675 return (LT.first * 1);
2676 } else if (TLI->isOperationCustom(ISD, LT.second)) {
2677 // If the operation is custom lowered then assume
2678 // that the code is twice as expensive.
2679 return (LT.first * 2);
2680 }
2681
2682 switch (IID) {
2683 case Intrinsic::fmuladd: {
2684 // If we can't lower fmuladd into an FMA estimate the cost as a floating
2685 // point mul followed by an add.
2686
2687 return thisT()->getArithmeticInstrCost(BinaryOperator::FMul, RetTy,
2688 CostKind) +
2689 thisT()->getArithmeticInstrCost(BinaryOperator::FAdd, RetTy,
2690 CostKind);
2691 }
2692 case Intrinsic::experimental_constrained_fmuladd: {
2693 IntrinsicCostAttributes FMulAttrs(
2694 Intrinsic::experimental_constrained_fmul, RetTy, Tys);
2695 IntrinsicCostAttributes FAddAttrs(
2696 Intrinsic::experimental_constrained_fadd, RetTy, Tys);
2697 return thisT()->getIntrinsicInstrCost(FMulAttrs, CostKind) +
2698 thisT()->getIntrinsicInstrCost(FAddAttrs, CostKind);
2699 }
2700 case Intrinsic::smin:
2701 case Intrinsic::smax:
2702 case Intrinsic::umin:
2703 case Intrinsic::umax: {
2704 // minmax(X,Y) = select(icmp(X,Y),X,Y)
2705 Type *CondTy = RetTy->getWithNewBitWidth(1);
2706 bool IsUnsigned = IID == Intrinsic::umax || IID == Intrinsic::umin;
2707 CmpInst::Predicate Pred =
2708 IsUnsigned ? CmpInst::ICMP_UGT : CmpInst::ICMP_SGT;
2710 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2711 Pred, CostKind);
2712 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2713 Pred, CostKind);
2714 return Cost;
2715 }
2716 case Intrinsic::sadd_with_overflow:
2717 case Intrinsic::ssub_with_overflow: {
2718 Type *SumTy = RetTy->getContainedType(0);
2719 Type *OverflowTy = RetTy->getContainedType(1);
2720 unsigned Opcode = IID == Intrinsic::sadd_with_overflow
2721 ? BinaryOperator::Add
2722 : BinaryOperator::Sub;
2723
2724 // Add:
2725 // Overflow -> (Result < LHS) ^ (RHS < 0)
2726 // Sub:
2727 // Overflow -> (Result < LHS) ^ (RHS > 0)
2729 Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy, CostKind);
2730 Cost +=
2731 2 * thisT()->getCmpSelInstrCost(Instruction::ICmp, SumTy, OverflowTy,
2733 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::Xor, OverflowTy,
2734 CostKind);
2735 return Cost;
2736 }
2737 case Intrinsic::uadd_with_overflow:
2738 case Intrinsic::usub_with_overflow: {
2739 Type *SumTy = RetTy->getContainedType(0);
2740 Type *OverflowTy = RetTy->getContainedType(1);
2741 unsigned Opcode = IID == Intrinsic::uadd_with_overflow
2742 ? BinaryOperator::Add
2743 : BinaryOperator::Sub;
2744 CmpInst::Predicate Pred = IID == Intrinsic::uadd_with_overflow
2747
2749 Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy, CostKind);
2750 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, SumTy,
2751 OverflowTy, Pred, CostKind);
2752 return Cost;
2753 }
2754 case Intrinsic::smul_with_overflow:
2755 case Intrinsic::umul_with_overflow: {
2756 Type *MulTy = RetTy->getContainedType(0);
2757 Type *OverflowTy = RetTy->getContainedType(1);
2758 unsigned ExtSize = MulTy->getScalarSizeInBits() * 2;
2759 Type *ExtTy = MulTy->getWithNewBitWidth(ExtSize);
2760 bool IsSigned = IID == Intrinsic::smul_with_overflow;
2761
2762 unsigned ExtOp = IsSigned ? Instruction::SExt : Instruction::ZExt;
2764
2766 Cost += 2 * thisT()->getCastInstrCost(ExtOp, ExtTy, MulTy, CCH, CostKind);
2767 Cost +=
2768 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy, CostKind);
2769 Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc, MulTy, ExtTy,
2770 CCH, CostKind);
2771 Cost += thisT()->getArithmeticInstrCost(
2772 Instruction::LShr, ExtTy, CostKind, {TTI::OK_AnyValue, TTI::OP_None},
2774
2775 if (IsSigned)
2776 Cost += thisT()->getArithmeticInstrCost(
2777 Instruction::AShr, MulTy, CostKind,
2780
2781 Cost += thisT()->getCmpSelInstrCost(
2782 BinaryOperator::ICmp, MulTy, OverflowTy, CmpInst::ICMP_NE, CostKind);
2783 return Cost;
2784 }
2785 case Intrinsic::sadd_sat:
2786 case Intrinsic::ssub_sat: {
2787 // Assume a default expansion.
2788 Type *CondTy = RetTy->getWithNewBitWidth(1);
2789
2790 Type *OpTy = StructType::create({RetTy, CondTy});
2791 Intrinsic::ID OverflowOp = IID == Intrinsic::sadd_sat
2792 ? Intrinsic::sadd_with_overflow
2793 : Intrinsic::ssub_with_overflow;
2795
2796 // SatMax -> Overflow && SumDiff < 0
2797 // SatMin -> Overflow && SumDiff >= 0
2799 IntrinsicCostAttributes Attrs(OverflowOp, OpTy, {RetTy, RetTy}, FMF,
2800 nullptr, ScalarizationCostPassed);
2801 Cost += thisT()->getIntrinsicInstrCost(Attrs, CostKind);
2802 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2803 Pred, CostKind);
2804 Cost += 2 * thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy,
2805 CondTy, Pred, CostKind);
2806 return Cost;
2807 }
2808 case Intrinsic::uadd_sat:
2809 case Intrinsic::usub_sat: {
2810 Type *CondTy = RetTy->getWithNewBitWidth(1);
2811
2812 Type *OpTy = StructType::create({RetTy, CondTy});
2813 Intrinsic::ID OverflowOp = IID == Intrinsic::uadd_sat
2814 ? Intrinsic::uadd_with_overflow
2815 : Intrinsic::usub_with_overflow;
2816
2818 IntrinsicCostAttributes Attrs(OverflowOp, OpTy, {RetTy, RetTy}, FMF,
2819 nullptr, ScalarizationCostPassed);
2820 Cost += thisT()->getIntrinsicInstrCost(Attrs, CostKind);
2821 Cost +=
2822 thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2824 return Cost;
2825 }
2826 case Intrinsic::smul_fix:
2827 case Intrinsic::umul_fix: {
2828 unsigned ExtSize = RetTy->getScalarSizeInBits() * 2;
2829 Type *ExtTy = RetTy->getWithNewBitWidth(ExtSize);
2830
2831 unsigned ExtOp =
2832 IID == Intrinsic::smul_fix ? Instruction::SExt : Instruction::ZExt;
2834
2836 Cost += 2 * thisT()->getCastInstrCost(ExtOp, ExtTy, RetTy, CCH, CostKind);
2837 Cost +=
2838 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy, CostKind);
2839 Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc, RetTy, ExtTy,
2840 CCH, CostKind);
2841 Cost += thisT()->getArithmeticInstrCost(
2842 Instruction::LShr, RetTy, CostKind, {TTI::OK_AnyValue, TTI::OP_None},
2844 Cost += thisT()->getArithmeticInstrCost(
2845 Instruction::Shl, RetTy, CostKind, {TTI::OK_AnyValue, TTI::OP_None},
2847 Cost += thisT()->getArithmeticInstrCost(Instruction::Or, RetTy, CostKind);
2848 return Cost;
2849 }
2850 case Intrinsic::abs: {
2851 // abs(X) = select(icmp(X,0),X,sub(0,X))
2852 Type *CondTy = RetTy->getWithNewBitWidth(1);
2855 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2856 Pred, CostKind);
2857 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2858 Pred, CostKind);
2859 // TODO: Should we add an OperandValueProperties::OP_Zero property?
2860 Cost += thisT()->getArithmeticInstrCost(
2861 BinaryOperator::Sub, RetTy, CostKind,
2863 return Cost;
2864 }
2865 case Intrinsic::fshl:
2866 case Intrinsic::fshr: {
2867 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
2868 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
2869 Type *CondTy = RetTy->getWithNewBitWidth(1);
2871 Cost +=
2872 thisT()->getArithmeticInstrCost(BinaryOperator::Or, RetTy, CostKind);
2873 Cost +=
2874 thisT()->getArithmeticInstrCost(BinaryOperator::Sub, RetTy, CostKind);
2875 Cost +=
2876 thisT()->getArithmeticInstrCost(BinaryOperator::Shl, RetTy, CostKind);
2877 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::LShr, RetTy,
2878 CostKind);
2879 // Non-constant shift amounts requires a modulo. If the typesize is a
2880 // power-2 then this will be converted to an and, otherwise it will use a
2881 // urem.
2882 Cost += thisT()->getArithmeticInstrCost(
2883 isPowerOf2_32(RetTy->getScalarSizeInBits()) ? BinaryOperator::And
2884 : BinaryOperator::URem,
2885 RetTy, CostKind, {TTI::OK_AnyValue, TTI::OP_None},
2886 {TTI::OK_UniformConstantValue, TTI::OP_None});
2887 // Shift-by-zero handling.
2888 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2890 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2892 return Cost;
2893 }
2894 case Intrinsic::fptosi_sat:
2895 case Intrinsic::fptoui_sat: {
2896 if (Tys.empty())
2897 break;
2898 Type *FromTy = Tys[0];
2899 bool IsSigned = IID == Intrinsic::fptosi_sat;
2900
2902 IntrinsicCostAttributes Attrs1(Intrinsic::minnum, FromTy,
2903 {FromTy, FromTy});
2904 Cost += thisT()->getIntrinsicInstrCost(Attrs1, CostKind);
2905 IntrinsicCostAttributes Attrs2(Intrinsic::maxnum, FromTy,
2906 {FromTy, FromTy});
2907 Cost += thisT()->getIntrinsicInstrCost(Attrs2, CostKind);
2908 Cost += thisT()->getCastInstrCost(
2909 IsSigned ? Instruction::FPToSI : Instruction::FPToUI, RetTy, FromTy,
2911 if (IsSigned) {
2912 Type *CondTy = RetTy->getWithNewBitWidth(1);
2913 Cost += thisT()->getCmpSelInstrCost(
2914 BinaryOperator::FCmp, FromTy, CondTy, CmpInst::FCMP_UNO, CostKind);
2915 Cost += thisT()->getCmpSelInstrCost(
2916 BinaryOperator::Select, RetTy, CondTy, CmpInst::FCMP_UNO, CostKind);
2917 }
2918 return Cost;
2919 }
2920 case Intrinsic::ucmp:
2921 case Intrinsic::scmp: {
2922 Type *CmpTy = Tys[0];
2923 Type *CondTy = RetTy->getWithNewBitWidth(1);
2925 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, CmpTy, CondTy,
2927 CostKind) +
2928 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, CmpTy, CondTy,
2930 CostKind);
2931
2932 EVT VT = TLI->getValueType(DL, CmpTy, true);
2934 // x < y ? -1 : (x > y ? 1 : 0)
2935 Cost += 2 * thisT()->getCmpSelInstrCost(
2936 BinaryOperator::Select, RetTy, CondTy,
2938 } else {
2939 // zext(x > y) - zext(x < y)
2940 Cost +=
2941 2 * thisT()->getCastInstrCost(CastInst::ZExt, RetTy, CondTy,
2943 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::Sub, RetTy,
2944 CostKind);
2945 }
2946 return Cost;
2947 }
2948 case Intrinsic::maximumnum:
2949 case Intrinsic::minimumnum: {
2950 // On platform that support FMAXNUM_IEEE/FMINNUM_IEEE, we expand
2951 // maximumnum/minimumnum to
2952 // ARG0 = fcanonicalize ARG0, ARG0 // to quiet ARG0
2953 // ARG1 = fcanonicalize ARG1, ARG1 // to quiet ARG1
2954 // RESULT = MAXNUM_IEEE ARG0, ARG1 // or MINNUM_IEEE
2955 // FIXME: In LangRef, we claimed FMAXNUM has the same behaviour of
2956 // FMAXNUM_IEEE, while the backend hasn't migrated the code yet.
2957 // Finally, we will remove FMAXNUM_IEEE and FMINNUM_IEEE.
2958 int IeeeISD =
2959 IID == Intrinsic::maximumnum ? ISD::FMAXNUM_IEEE : ISD::FMINNUM_IEEE;
2960 if (TLI->isOperationLegal(IeeeISD, LT.second)) {
2961 IntrinsicCostAttributes FCanonicalizeAttrs(Intrinsic::canonicalize,
2962 RetTy, Tys[0]);
2963 InstructionCost FCanonicalizeCost =
2964 thisT()->getIntrinsicInstrCost(FCanonicalizeAttrs, CostKind);
2965 return LT.first + FCanonicalizeCost * 2;
2966 }
2967 break;
2968 }
2969 default:
2970 break;
2971 }
2972
2973 // Else, assume that we need to scalarize this intrinsic. For math builtins
2974 // this will emit a costly libcall, adding call overhead and spills. Make it
2975 // very expensive.
2976 if (isVectorizedTy(RetTy)) {
2977 ArrayRef<Type *> RetVTys = getContainedTypes(RetTy);
2978
2979 // Scalable vectors cannot be scalarized, so return Invalid.
2980 if (any_of(concat<Type *const>(RetVTys, Tys),
2981 [](Type *Ty) { return isa<ScalableVectorType>(Ty); }))
2983
2984 InstructionCost ScalarizationCost = ScalarizationCostPassed;
2985 if (!SkipScalarizationCost) {
2986 ScalarizationCost = 0;
2987 for (Type *RetVTy : RetVTys) {
2988 ScalarizationCost += getScalarizationOverhead(
2989 cast<VectorType>(RetVTy), /*Insert=*/true,
2990 /*Extract=*/false, CostKind);
2991 }
2992 }
2993
2994 unsigned ScalarCalls = getVectorizedTypeVF(RetTy).getFixedValue();
2995 SmallVector<Type *, 4> ScalarTys;
2996 for (Type *Ty : Tys) {
2997 if (Ty->isVectorTy())
2998 Ty = Ty->getScalarType();
2999 ScalarTys.push_back(Ty);
3000 }
3001 IntrinsicCostAttributes Attrs(IID, toScalarizedTy(RetTy), ScalarTys, FMF);
3002 InstructionCost ScalarCost =
3003 thisT()->getIntrinsicInstrCost(Attrs, CostKind);
3004 for (Type *Ty : Tys) {
3005 if (auto *VTy = dyn_cast<VectorType>(Ty)) {
3006 if (!ICA.skipScalarizationCost())
3007 ScalarizationCost += getScalarizationOverhead(
3008 VTy, /*Insert*/ false, /*Extract*/ true, CostKind);
3009 ScalarCalls = std::max(ScalarCalls,
3011 }
3012 }
3013 return ScalarCalls * ScalarCost + ScalarizationCost;
3014 }
3015
3016 // This is going to be turned into a library call, make it expensive.
3017 return SingleCallCost;
3018 }
3019
3020 /// Compute a cost of the given call instruction.
3021 ///
3022 /// Compute the cost of calling function F with return type RetTy and
3023 /// argument types Tys. F might be nullptr, in this case the cost of an
3024 /// arbitrary call with the specified signature will be returned.
3025 /// This is used, for instance, when we estimate call of a vector
3026 /// counterpart of the given function.
3027 /// \param F Called function, might be nullptr.
3028 /// \param RetTy Return value types.
3029 /// \param Tys Argument types.
3030 /// \returns The cost of Call instruction.
3033 TTI::TargetCostKind CostKind) const override {
3034 return 10;
3035 }
3036
3037 unsigned getNumberOfParts(Type *Tp) const override {
3038 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Tp);
3039 if (!LT.first.isValid())
3040 return 0;
3041 // Try to find actual number of parts for non-power-of-2 elements as
3042 // ceil(num-of-elements/num-of-subtype-elements).
3043 if (auto *FTp = dyn_cast<FixedVectorType>(Tp);
3044 Tp && LT.second.isFixedLengthVector() &&
3045 !has_single_bit(FTp->getNumElements())) {
3046 if (auto *SubTp = dyn_cast_if_present<FixedVectorType>(
3047 EVT(LT.second).getTypeForEVT(Tp->getContext()));
3048 SubTp && SubTp->getElementType() == FTp->getElementType())
3049 return divideCeil(FTp->getNumElements(), SubTp->getNumElements());
3050 }
3051 return LT.first.getValue();
3052 }
3053
3056 TTI::TargetCostKind) const override {
3057 return 0;
3058 }
3059
3060 /// Try to calculate arithmetic and shuffle op costs for reduction intrinsics.
3061 /// We're assuming that reduction operation are performing the following way:
3062 ///
3063 /// %val1 = shufflevector<n x t> %val, <n x t> %undef,
3064 /// <n x i32> <i32 n/2, i32 n/2 + 1, ..., i32 n, i32 undef, ..., i32 undef>
3065 /// \----------------v-------------/ \----------v------------/
3066 /// n/2 elements n/2 elements
3067 /// %red1 = op <n x t> %val, <n x t> val1
3068 /// After this operation we have a vector %red1 where only the first n/2
3069 /// elements are meaningful, the second n/2 elements are undefined and can be
3070 /// dropped. All other operations are actually working with the vector of
3071 /// length n/2, not n, though the real vector length is still n.
3072 /// %val2 = shufflevector<n x t> %red1, <n x t> %undef,
3073 /// <n x i32> <i32 n/4, i32 n/4 + 1, ..., i32 n/2, i32 undef, ..., i32 undef>
3074 /// \----------------v-------------/ \----------v------------/
3075 /// n/4 elements 3*n/4 elements
3076 /// %red2 = op <n x t> %red1, <n x t> val2 - working with the vector of
3077 /// length n/2, the resulting vector has length n/4 etc.
3078 ///
3079 /// The cost model should take into account that the actual length of the
3080 /// vector is reduced on each iteration.
3083 // Targets must implement a default value for the scalable case, since
3084 // we don't know how many lanes the vector has.
3087
3088 Type *ScalarTy = Ty->getElementType();
3089 unsigned NumVecElts = cast<FixedVectorType>(Ty)->getNumElements();
3090 if ((Opcode == Instruction::Or || Opcode == Instruction::And) &&
3091 ScalarTy == IntegerType::getInt1Ty(Ty->getContext()) &&
3092 NumVecElts >= 2) {
3093 // Or reduction for i1 is represented as:
3094 // %val = bitcast <ReduxWidth x i1> to iReduxWidth
3095 // %res = cmp ne iReduxWidth %val, 0
3096 // And reduction for i1 is represented as:
3097 // %val = bitcast <ReduxWidth x i1> to iReduxWidth
3098 // %res = cmp eq iReduxWidth %val, 11111
3099 Type *ValTy = IntegerType::get(Ty->getContext(), NumVecElts);
3100 return thisT()->getCastInstrCost(Instruction::BitCast, ValTy, Ty,
3102 thisT()->getCmpSelInstrCost(Instruction::ICmp, ValTy,
3105 }
3106 unsigned NumReduxLevels = Log2_32(NumVecElts);
3107 InstructionCost ArithCost = 0;
3108 InstructionCost ShuffleCost = 0;
3109 std::pair<InstructionCost, MVT> LT = thisT()->getTypeLegalizationCost(Ty);
3110 unsigned LongVectorCount = 0;
3111 unsigned MVTLen =
3112 LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
3113 while (NumVecElts > MVTLen) {
3114 NumVecElts /= 2;
3115 VectorType *SubTy = FixedVectorType::get(ScalarTy, NumVecElts);
3116 ShuffleCost += thisT()->getShuffleCost(
3117 TTI::SK_ExtractSubvector, SubTy, Ty, {}, CostKind, NumVecElts, SubTy);
3118 ArithCost += thisT()->getArithmeticInstrCost(Opcode, SubTy, CostKind);
3119 Ty = SubTy;
3120 ++LongVectorCount;
3121 }
3122
3123 NumReduxLevels -= LongVectorCount;
3124
3125 // The minimal length of the vector is limited by the real length of vector
3126 // operations performed on the current platform. That's why several final
3127 // reduction operations are performed on the vectors with the same
3128 // architecture-dependent length.
3129
3130 // By default reductions need one shuffle per reduction level.
3131 ShuffleCost +=
3132 NumReduxLevels * thisT()->getShuffleCost(TTI::SK_PermuteSingleSrc, Ty,
3133 Ty, {}, CostKind, 0, Ty);
3134 ArithCost +=
3135 NumReduxLevels * thisT()->getArithmeticInstrCost(Opcode, Ty, CostKind);
3136 return ShuffleCost + ArithCost +
3137 thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
3138 CostKind, 0, nullptr, nullptr);
3139 }
3140
3141 /// Try to calculate the cost of performing strict (in-order) reductions,
3142 /// which involves doing a sequence of floating point additions in lane
3143 /// order, starting with an initial value. For example, consider a scalar
3144 /// initial value 'InitVal' of type float and a vector of type <4 x float>:
3145 ///
3146 /// Vector = <float %v0, float %v1, float %v2, float %v3>
3147 ///
3148 /// %add1 = %InitVal + %v0
3149 /// %add2 = %add1 + %v1
3150 /// %add3 = %add2 + %v2
3151 /// %add4 = %add3 + %v3
3152 ///
3153 /// As a simple estimate we can say the cost of such a reduction is 4 times
3154 /// the cost of a scalar FP addition. We can only estimate the costs for
3155 /// fixed-width vectors here because for scalable vectors we do not know the
3156 /// runtime number of operations.
3159 // Targets must implement a default value for the scalable case, since
3160 // we don't know how many lanes the vector has.
3163
3164 auto *VTy = cast<FixedVectorType>(Ty);
3166 VTy, /*Insert=*/false, /*Extract=*/true, CostKind);
3167 InstructionCost ArithCost = thisT()->getArithmeticInstrCost(
3168 Opcode, VTy->getElementType(), CostKind);
3169 ArithCost *= VTy->getNumElements();
3170
3171 return ExtractCost + ArithCost;
3172 }
3173
3176 std::optional<FastMathFlags> FMF,
3177 TTI::TargetCostKind CostKind) const override {
3178 assert(Ty && "Unknown reduction vector type");
3180 return getOrderedReductionCost(Opcode, Ty, CostKind);
3181 return getTreeReductionCost(Opcode, Ty, CostKind);
3182 }
3183
3184 /// Try to calculate op costs for min/max reduction operations.
3185 /// \param CondTy Conditional type for the Select instruction.
3188 TTI::TargetCostKind CostKind) const override {
3189 // Targets must implement a default value for the scalable case, since
3190 // we don't know how many lanes the vector has.
3193
3194 Type *ScalarTy = Ty->getElementType();
3195 unsigned NumVecElts = cast<FixedVectorType>(Ty)->getNumElements();
3196 unsigned NumReduxLevels = Log2_32(NumVecElts);
3197 InstructionCost MinMaxCost = 0;
3198 InstructionCost ShuffleCost = 0;
3199 std::pair<InstructionCost, MVT> LT = thisT()->getTypeLegalizationCost(Ty);
3200 unsigned LongVectorCount = 0;
3201 unsigned MVTLen =
3202 LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
3203 while (NumVecElts > MVTLen) {
3204 NumVecElts /= 2;
3205 auto *SubTy = FixedVectorType::get(ScalarTy, NumVecElts);
3206
3207 ShuffleCost += thisT()->getShuffleCost(
3208 TTI::SK_ExtractSubvector, SubTy, Ty, {}, CostKind, NumVecElts, SubTy);
3209
3210 IntrinsicCostAttributes Attrs(IID, SubTy, {SubTy, SubTy}, FMF);
3211 MinMaxCost += getIntrinsicInstrCost(Attrs, CostKind);
3212 Ty = SubTy;
3213 ++LongVectorCount;
3214 }
3215
3216 NumReduxLevels -= LongVectorCount;
3217
3218 // The minimal length of the vector is limited by the real length of vector
3219 // operations performed on the current platform. That's why several final
3220 // reduction opertions are perfomed on the vectors with the same
3221 // architecture-dependent length.
3222 ShuffleCost +=
3223 NumReduxLevels * thisT()->getShuffleCost(TTI::SK_PermuteSingleSrc, Ty,
3224 Ty, {}, CostKind, 0, Ty);
3225 IntrinsicCostAttributes Attrs(IID, Ty, {Ty, Ty}, FMF);
3226 MinMaxCost += NumReduxLevels * getIntrinsicInstrCost(Attrs, CostKind);
3227 // The last min/max should be in vector registers and we counted it above.
3228 // So just need a single extractelement.
3229 return ShuffleCost + MinMaxCost +
3230 thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
3231 CostKind, 0, nullptr, nullptr);
3232 }
3233
3235 getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy,
3236 VectorType *Ty, std::optional<FastMathFlags> FMF,
3237 TTI::TargetCostKind CostKind) const override {
3238 if (auto *FTy = dyn_cast<FixedVectorType>(Ty);
3239 FTy && IsUnsigned && Opcode == Instruction::Add &&
3240 FTy->getElementType() == IntegerType::getInt1Ty(Ty->getContext())) {
3241 // Represent vector_reduce_add(ZExt(<n x i1>)) as
3242 // ZExtOrTrunc(ctpop(bitcast <n x i1> to in)).
3243 auto *IntTy =
3244 IntegerType::get(ResTy->getContext(), FTy->getNumElements());
3245 IntrinsicCostAttributes ICA(Intrinsic::ctpop, IntTy, {IntTy},
3246 FMF ? *FMF : FastMathFlags());
3247 return thisT()->getCastInstrCost(Instruction::BitCast, IntTy, FTy,
3249 thisT()->getIntrinsicInstrCost(ICA, CostKind);
3250 }
3251 // Without any native support, this is equivalent to the cost of
3252 // vecreduce.opcode(ext(Ty A)).
3253 VectorType *ExtTy = VectorType::get(ResTy, Ty);
3254 InstructionCost RedCost =
3255 thisT()->getArithmeticReductionCost(Opcode, ExtTy, FMF, CostKind);
3256 InstructionCost ExtCost = thisT()->getCastInstrCost(
3257 IsUnsigned ? Instruction::ZExt : Instruction::SExt, ExtTy, Ty,
3259
3260 return RedCost + ExtCost;
3261 }
3262
3264 getMulAccReductionCost(bool IsUnsigned, unsigned RedOpcode, Type *ResTy,
3265 VectorType *Ty,
3266 TTI::TargetCostKind CostKind) const override {
3267 // Without any native support, this is equivalent to the cost of
3268 // vecreduce.add(mul(ext(Ty A), ext(Ty B))) or
3269 // vecreduce.add(mul(A, B)).
3270 assert((RedOpcode == Instruction::Add || RedOpcode == Instruction::Sub) &&
3271 "The reduction opcode is expected to be Add or Sub.");
3272 VectorType *ExtTy = VectorType::get(ResTy, Ty);
3273 InstructionCost RedCost = thisT()->getArithmeticReductionCost(
3274 RedOpcode, ExtTy, std::nullopt, CostKind);
3275 InstructionCost ExtCost = thisT()->getCastInstrCost(
3276 IsUnsigned ? Instruction::ZExt : Instruction::SExt, ExtTy, Ty,
3278
3279 InstructionCost MulCost =
3280 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy, CostKind);
3281
3282 return RedCost + MulCost + 2 * ExtCost;
3283 }
3284
3286
3287 /// @}
3288};
3289
3290/// Concrete BasicTTIImpl that can be used if no further customization
3291/// is needed.
3292class BasicTTIImpl : public BasicTTIImplBase<BasicTTIImpl> {
3293 using BaseT = BasicTTIImplBase<BasicTTIImpl>;
3294
3295 friend class BasicTTIImplBase<BasicTTIImpl>;
3296
3297 const TargetSubtargetInfo *ST;
3298 const TargetLoweringBase *TLI;
3299
3300 const TargetSubtargetInfo *getST() const { return ST; }
3301 const TargetLoweringBase *getTLI() const { return TLI; }
3302
3303public:
3304 explicit BasicTTIImpl(const TargetMachine *TM, const Function &F);
3305};
3306
3307} // end namespace llvm
3308
3309#endif // LLVM_CODEGEN_BASICTTIIMPL_H
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file implements the BitVector class.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
static const Function * getCalledFunction(const Value *V)
#define T
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
#define P(N)
static unsigned getNumElements(Type *Ty)
static Type * getValueType(Value *V)
Returns the type of the given value/instruction V.
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
This file describes how to lower LLVM code to machine code.
This file provides helpers for the implementation of a TargetTransformInfo-conforming class.
This pass exposes codegen information to IR-level passes.
Class for arbitrary precision integers.
Definition APInt.h:78
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:234
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
Definition APInt.h:1330
bool sgt(const APInt &RHS) const
Signed greater than comparison.
Definition APInt.h:1201
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1488
bool slt(const APInt &RHS) const
Signed less than comparison.
Definition APInt.h:1130
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition APInt.h:200
an instruction to allocate memory on the stack
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
Definition ArrayRef.h:196
size_t size() const
size - Get the array size.
Definition ArrayRef.h:143
ArrayRef< T > drop_back(size_t N=1) const
Drop the last N elements of the array.
Definition ArrayRef.h:202
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
InstructionCost getFPOpCost(Type *Ty) const override
bool preferToKeepConstantsAttached(const Instruction &Inst, const Function &Fn) const override
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false) const override
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, const Value *Op0, const Value *Op1) const override
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind) const override
Try to calculate op costs for min/max reduction operations.
bool isIndexedLoadLegal(TTI::MemIndexedMode M, Type *Ty) const override
InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr, ArrayRef< const Value * > Operands, Type *AccessType, TTI::TargetCostKind CostKind) const override
unsigned getCallerAllocaCost(const CallBase *CB, const AllocaInst *AI) const override
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const override
bool shouldBuildLookupTables() const override
InstructionCost getScalarizationOverhead(VectorType *InTy, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, bool ForPoisonSrc=true, ArrayRef< Value * > VL={}) const override
Estimate the overhead of scalarizing an instruction.
bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const override
bool isProfitableToHoist(Instruction *I) const override
unsigned getNumberOfParts(Type *Tp) const override
unsigned getMinPrefetchStride(unsigned NumMemAccesses, unsigned NumStridedMemAccesses, unsigned NumPrefetches, bool HasCall) const override
InstructionCost getStridedMemoryOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) const override
InstructionCost getVectorInstrCost(const Instruction &I, Type *Val, TTI::TargetCostKind CostKind, unsigned Index) const override
bool useAA() const override
unsigned getPrefetchDistance() const override
TTI::ShuffleKind improveShuffleKindFromMask(TTI::ShuffleKind Kind, ArrayRef< int > Mask, VectorType *SrcTy, int &Index, VectorType *&SubTy) const
unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy, Type *ScalarValTy) const override
bool isLegalAddScalableImmediate(int64_t Imm) const override
unsigned getAssumedAddrSpace(const Value *V) const override
std::optional< Value * > simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed) const override
bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace, Instruction *I=nullptr, int64_t ScalableOffset=0) const override
bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const override
bool areInlineCompatible(const Function *Caller, const Function *Callee) const override
bool isIndexedStoreLegal(TTI::MemIndexedMode M, Type *Ty) const override
bool haveFastSqrt(Type *Ty) const override
bool collectFlatAddressOperands(SmallVectorImpl< int > &OpIndexes, Intrinsic::ID IID) const override
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Scalar, ArrayRef< std::tuple< Value *, User *, int > > ScalarUserAndIdx) const override
unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI, unsigned &JumpTableSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) const override
Value * rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV, Value *NewV) const override
unsigned adjustInliningThreshold(const CallBase *CB) const override
unsigned getInliningThresholdMultiplier() const override
InstructionCost getExpandCompressMemoryOpCost(unsigned Opcode, Type *DataTy, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset, int64_t MaxOffset)
bool shouldBuildRelLookupTables() const override
bool isTargetIntrinsicWithStructReturnOverloadAtField(Intrinsic::ID ID, int RetIdx) const override
InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind) const override
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, StackOffset BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace) const override
unsigned getEpilogueVectorizationMinVF() const override
InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index, TTI::TargetCostKind CostKind) const override
InstructionCost getVectorSplitCost() const
bool isTruncateFree(Type *Ty1, Type *Ty2) const override
std::optional< unsigned > getMaxVScale() const override
unsigned getFlatAddressSpace() const override
InstructionCost getCallInstrCost(Function *F, Type *RetTy, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const override
Compute a cost of the given call instruction.
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE) const override
InstructionCost getTreeReductionCost(unsigned Opcode, VectorType *Ty, TTI::TargetCostKind CostKind) const
Try to calculate arithmetic and shuffle op costs for reduction intrinsics.
~BasicTTIImplBase() override=default
std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const override
unsigned getMaxPrefetchIterationsAhead() const override
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP) const override
InstructionCost getTypeBasedIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const
Get intrinsic cost based on argument types.
bool hasBranchDivergence(const Function *F=nullptr) const override
InstructionCost getOrderedReductionCost(unsigned Opcode, VectorType *Ty, TTI::TargetCostKind CostKind) const
Try to calculate the cost of performing strict (in-order) reductions, which involves doing a sequence...
bool isTargetIntrinsicTriviallyScalarizable(Intrinsic::ID ID) const override
bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) const override
std::optional< unsigned > getCacheAssociativity(TargetTransformInfo::CacheLevel Level) const override
bool shouldPrefetchAddressSpace(unsigned AS) const override
bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, unsigned AddressSpace, Align Alignment, unsigned *Fast) const override
unsigned getCacheLineSize() const override
std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const override
bool shouldDropLSRSolutionIfLessProfitable() const override
int getInlinerVectorBonusPercent() const override
bool isVScaleKnownToBeAPowerOfTwo() const override
InstructionCost getMulAccReductionCost(bool IsUnsigned, unsigned RedOpcode, Type *ResTy, VectorType *Ty, TTI::TargetCostKind CostKind) const override
InstructionCost getIndexedVectorInstrCostFromEnd(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index) const override
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
std::pair< InstructionCost, MVT > getTypeLegalizationCost(Type *Ty) const
Estimate the cost of type-legalization and the legalized type.
bool isLegalAddImmediate(int64_t imm) const override
InstructionCost getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF, const APInt &DemandedDstElts, TTI::TargetCostKind CostKind) const override
unsigned getMaxInterleaveFactor(ElementCount VF) const override
bool isSingleThreaded() const override
InstructionCost getScalarizationOverhead(VectorType *InTy, bool Insert, bool Extract, TTI::TargetCostKind CostKind) const
Helper wrapper for the DemandedElts variant of getScalarizationOverhead.
bool isProfitableLSRChainElement(Instruction *I) const override
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const override
bool isTargetIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID, int OpdIdx) const override
bool isTargetIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, unsigned ScalarOpdIdx) const override
std::optional< unsigned > getVScaleForTuning() const override
InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind) const override
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const override
Get intrinsic cost based on arguments.
TailFoldingStyle getPreferredTailFoldingStyle(bool IVUpdateMayOverflow=true) const override
std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) const override
InstructionCost getAddressComputationCost(Type *PtrTy, ScalarEvolution *, const SCEV *, TTI::TargetCostKind) const override
bool isSourceOfDivergence(const Value *V) const override
bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) const override
InstructionCost getScalarizationOverhead(VectorType *RetTy, ArrayRef< const Value * > Args, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const
Estimate the overhead of scalarizing the inputs and outputs of an instruction, with return type RetTy...
std::optional< unsigned > getCacheSize(TargetTransformInfo::CacheLevel Level) const override
bool isAlwaysUniform(const Value *V) const override
bool isLegalICmpImmediate(int64_t imm) const override
bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const override
unsigned getRegUsageForType(Type *Ty) const override
InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
bool isTypeLegal(Type *Ty) const override
InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *DataTy, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind) const override
bool enableWritePrefetching() const override
bool isLSRCostLess(const TTI::LSRCost &C1, const TTI::LSRCost &C2) const override
InstructionCost getOperandsScalarizationOverhead(ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const override
Estimate the overhead of scalarizing an instruction's operands.
bool isNumRegsMajorCostOfLSR() const override
BasicTTIImpl(const TargetMachine *TM, const Function &F)
size_type count() const
count - Returns the number of bits which are set.
Definition BitVector.h:181
BitVector & set()
Definition BitVector.h:370
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition InstrTypes.h:982
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:703
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition InstrTypes.h:686
static CmpInst::Predicate getGTPredicate(Intrinsic::ID ID)
static CmpInst::Predicate getLTPredicate(Intrinsic::ID ID)
This class represents a range of values.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
constexpr bool isVector() const
One or more elements.
Definition TypeSize.h:325
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition TypeSize.h:310
constexpr bool isScalar() const
Exactly one element.
Definition TypeSize.h:321
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:22
Container class for subtarget features.
Class to represent fixed width SIMD vectors.
unsigned getNumElements() const
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
Definition Type.cpp:803
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition Function.h:352
The core instruction combiner logic.
static InstructionCost getInvalid(CostType Val=0)
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:319
const TargetLibraryInfo * getLibInfo() const
const SmallVectorImpl< Type * > & getArgTypes() const
const SmallVectorImpl< const Value * > & getArgs() const
InstructionCost getScalarizationCost() const
const IntrinsicInst * getInst() const
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
Machine Value Type.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
The optimization diagnostic interface.
LLVM_ABI void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Diagnostic information for applied optimization remarks.
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Analysis providing profile information.
This class represents an analyzed expression in the program.
The main scalar evolution driver.
static LLVM_ABI bool isZeroEltSplatMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses all elements with the same value as the first element of exa...
static LLVM_ABI bool isSpliceMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is a splice mask, concatenating the two inputs together and then ext...
static LLVM_ABI bool isSelectMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from its source vectors without lane crossings.
static LLVM_ABI bool isExtractSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is an extract subvector mask.
static LLVM_ABI bool isReverseMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask swaps the order of elements from exactly one source vector.
static LLVM_ABI bool isTransposeMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask is a transpose mask.
static LLVM_ABI bool isInsertSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &NumSubElts, int &Index)
Return true if this shuffle mask is an insert subvector mask.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
Definition TypeSize.h:31
static StackOffset getScalable(int64_t Scalable)
Definition TypeSize.h:41
static StackOffset getFixed(int64_t Fixed)
Definition TypeSize.h:40
static LLVM_ABI StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
Definition Type.cpp:620
Multiway switch.
Provides information about what library functions are available for the current target.
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
bool isOperationExpand(unsigned Op, EVT VT) const
Return true if the specified operation is illegal on this target or unlikely to be made legal with cu...
int InstructionOpcodeToISD(unsigned Opcode) const
Get the ISD node that corresponds to the Instruction class opcode.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
virtual bool preferSelectsOverBooleanArithmetic(EVT VT) const
Should we prefer selects to doing arithmetic on boolean types.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases, uint64_t Range, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) const
Return true if lowering to a jump table is suitable for a set of case clusters which may contain NumC...
virtual bool areJTsAllowed(const Function *Fn) const
Return true if lowering to a jump table is allowed.
bool isOperationLegalOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal using promotion.
bool isOperationCustom(unsigned Op, EVT VT) const
Return true if the operation uses custom lowering, regardless of whether the type is legal or not.
bool isSuitableForBitTests(const DenseMap< const BasicBlock *, unsigned int > &DestCmps, const APInt &Low, const APInt &High, const DataLayout &DL) const
Return true if lowering to a bit test is suitable for a set of case clusters which contains NumDests ...
virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const
Return true if it's free to truncate a value of type FromTy to type ToTy.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g.
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const
Return how this store with truncation should be treated: either it is legal, needs to be promoted to ...
LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return how this load with extension should be treated: either it is legal, needs to be promoted to a ...
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return true if the specified load with extension is legal on this target.
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
virtual bool isFAbsFree(EVT VT) const
Return true if an fabs operation is free to the point where it is never worthwhile to replace it with...
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
std::pair< LegalizeTypeAction, EVT > LegalizeKind
LegalizeKind holds the legalization kind that needs to happen to EVT in order to type-legalize it.
Primary interface to the complete machine description for the target machine.
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual bool isProfitableLSRChainElement(Instruction *I) const
virtual const DataLayout & getDataLayout() const
virtual std::optional< unsigned > getCacheAssociativity(TargetTransformInfo::CacheLevel Level) const
virtual std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) const
virtual bool shouldDropLSRSolutionIfLessProfitable() const
virtual bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) const
virtual bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const
virtual std::optional< Value * > simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed) const
virtual std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const
virtual unsigned getEpilogueVectorizationMinVF() const
virtual bool isLoweredToCall(const Function *F) const
virtual InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info, TTI::OperandValueInfo Opd2Info, ArrayRef< const Value * > Args, const Instruction *CxtI=nullptr) const
virtual InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const
virtual bool isLSRCostLess(const TTI::LSRCost &C1, const TTI::LSRCost &C2) const
virtual InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I) const
virtual InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const
virtual InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info, TTI::OperandValueInfo Op2Info, const Instruction *I) const
virtual TailFoldingStyle getPreferredTailFoldingStyle(bool IVUpdateMayOverflow=true) const
InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr, ArrayRef< const Value * > Operands, Type *AccessType, TTI::TargetCostKind CostKind) const override
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
static LLVM_ABI OperandValueInfo getOperandInfo(const Value *V)
Collect properties of V used in cost analysis, e.g. OP_PowerOf2.
TargetCostKind
The kind of cost model.
@ TCK_RecipThroughput
Reciprocal throughput.
@ TCK_CodeSize
Instruction code size.
static bool requiresOrderedReduction(std::optional< FastMathFlags > FMF)
A helper function to determine the type of reduction algorithm used for a given Opcode and set of Fas...
@ TCC_Expensive
The cost of a 'div' instruction on x86.
@ TCC_Basic
The cost of a typical 'add' instruction.
MemIndexedMode
The type of load/store indexing.
ShuffleKind
The various kinds of shuffle patterns for vector queries.
@ SK_InsertSubvector
InsertSubvector. Index indicates start offset.
@ SK_Select
Selects elements from the corresponding lane of either source operand.
@ SK_PermuteSingleSrc
Shuffle elements of single source vector with any shuffle mask.
@ SK_Transpose
Transpose two vectors.
@ SK_Splice
Concatenates elements from the first input vector with elements of the second input vector.
@ SK_Broadcast
Broadcast element 0 to all other elements.
@ SK_PermuteTwoSrc
Merge elements from two source vectors into one with any shuffle mask.
@ SK_Reverse
Reverse the order of the vector.
@ SK_ExtractSubvector
ExtractSubvector Index indicates start offset.
CastContextHint
Represents a hint about the context in which a cast is used.
@ None
The cast is not used with a load/store of any kind.
@ Normal
The cast is used with a normal load/store.
CacheLevel
The possible cache levels.
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition Triple.h:413
LLVM_ABI bool isArch64Bit() const
Test whether the architecture is 64-bit.
Definition Triple.cpp:1791
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, DriverKit, XROS, or bridgeOS).
Definition Triple.h:613
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition TypeSize.h:344
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Definition Type.cpp:295
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
LLVM_ABI Type * getWithNewBitWidth(unsigned NewBitWidth) const
Given an integer or vector type, change the lane bitwidth to NewBitwidth, whilst keeping the old numb...
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition Type.h:128
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:231
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
Definition Type.cpp:294
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
Definition Type.cpp:301
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition Type.h:225
Type * getContainedType(unsigned i) const
This method is used to implement the type iterator (defined at the end of the file).
Definition Type.h:381
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
Value * getOperand(unsigned i) const
Definition User.h:232
static LLVM_ABI bool isVPBinOp(Intrinsic::ID ID)
static LLVM_ABI bool isVPCast(Intrinsic::ID ID)
static LLVM_ABI bool isVPCmp(Intrinsic::ID ID)
static LLVM_ABI std::optional< unsigned > getFunctionalOpcodeForVP(Intrinsic::ID ID)
static LLVM_ABI std::optional< Intrinsic::ID > getFunctionalIntrinsicIDForVP(Intrinsic::ID ID)
static LLVM_ABI bool isVPIntrinsic(Intrinsic::ID)
static LLVM_ABI bool isVPReduction(Intrinsic::ID ID)
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
Provides info so a possible vectorization of a function can be computed.
bool isMasked() const
Base class of all SIMD vector types.
static VectorType * getHalfElementsVectorType(VectorType *VTy)
This static method returns a VectorType with half as many elements as the input type and the same ele...
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Type * getElementType() const
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:201
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:217
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:169
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
LLVM_ABI APInt ScaleBitMask(const APInt &A, unsigned NewBitWidth, bool MatchAllBits=false)
Splat/Merge neighboring bits to widen/narrow the bitmask represented by.
Definition APInt.cpp:3009
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
ISD namespace - This namespace contains an enum which represents all of the SelectionDAG node types a...
Definition ISDOpcodes.h:24
@ BSWAP
Byte Swap and Counting operators.
Definition ISDOpcodes.h:771
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
Definition ISDOpcodes.h:387
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
Definition ISDOpcodes.h:511
@ FADD
Simple binary floating point operators.
Definition ISDOpcodes.h:410
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
Definition ISDOpcodes.h:744
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition ISDOpcodes.h:275
@ SSUBO
Same for subtraction.
Definition ISDOpcodes.h:347
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
Definition ISDOpcodes.h:534
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
Definition ISDOpcodes.h:369
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
Definition ISDOpcodes.h:784
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
Definition ISDOpcodes.h:343
@ SMULO
Same for multiplication.
Definition ISDOpcodes.h:351
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
Definition ISDOpcodes.h:724
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
Definition ISDOpcodes.h:793
@ SCMP
[US]CMP - 3-way comparison of signed or unsigned integers.
Definition ISDOpcodes.h:732
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
Definition ISDOpcodes.h:933
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition ISDOpcodes.h:527
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
Definition ISDOpcodes.h:360
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
LLVM_ABI bool isTargetIntrinsic(ID IID)
isTargetIntrinsic - Returns true if IID is an intrinsic specific to a certain target.
LLVM_ABI Libcall getSINCOSPI(EVT RetVT)
getSINCOSPI - Return the SINCOSPI_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getMODF(EVT RetVT)
getMODF - Return the MODF_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getSINCOS(EVT RetVT)
getSINCOS - Return the SINCOS_* value for the given types, or UNKNOWN_LIBCALL if there is none.
DiagnosticInfoOptimizationBase::Argument NV
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1725
LLVM_ABI Intrinsic::ID getMinMaxReductionIntrinsicOp(Intrinsic::ID RdxID)
Returns the min/max intrinsic used when expanding a min/max reduction.
detail::zippy< detail::zip_first, T, U, Args... > zip_equal(T &&t, U &&u, Args &&...args)
zip iterator that assumes that all iteratees have the same length.
Definition STLExtras.h:839
InstructionCost Cost
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2472
Type * toScalarizedTy(Type *Ty)
A helper for converting vectorized types to scalarized (non-vector) types.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
auto dyn_cast_if_present(const Y &Val)
dyn_cast_if_present<X> - Functionally identical to dyn_cast, except that a null (or none in the case ...
Definition Casting.h:732
LLVM_ABI unsigned getArithmeticReductionInstruction(Intrinsic::ID RdxID)
Returns the arithmetic instruction opcode used when expanding a reduction.
bool isVectorizedTy(Type *Ty)
Returns true if Ty is a vector type or a struct of vector types where all vector types share the same...
detail::concat_range< ValueT, RangeTs... > concat(RangeTs &&...Ranges)
Returns a concatenated range across two or more ranges.
Definition STLExtras.h:1150
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
constexpr bool has_single_bit(T Value) noexcept
Definition bit.h:147
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1732
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:331
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
ElementCount getVectorizedTypeVF(Type *Ty)
Returns the number of vector elements for a vectorized type.
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
constexpr int PoisonMaskElem
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
Definition MathExtras.h:394
FunctionAddr VTableAddr uintptr_t uintptr_t Data
Definition InstrProf.h:189
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ UMax
Unsigned integer max implemented in terms of select(cmp()).
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
ArrayRef< Type * > getContainedTypes(Type *const &Ty)
Returns the types contained in Ty.
cl::opt< unsigned > PartialUnrollingThreshold
LLVM_ABI bool isVectorizedStructTy(StructType *StructTy)
Returns true if StructTy is an unpacked literal struct where all elements are vectors of matching ele...
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Extended Value Type.
Definition ValueTypes.h:35
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition ValueTypes.h:137
ElementCount getVectorElementCount() const
Definition ValueTypes.h:350
static LLVM_ABI EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition ValueTypes.h:316
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
Definition ValueTypes.h:65
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Definition ValueTypes.h:323
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Attributes of a target dependent hardware loop.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
bool AllowPeeling
Allow peeling off loop iterations.
bool AllowLoopNestsPeeling
Allow peeling off loop iterations for loop nests.
bool PeelProfiledIterations
Allow peeling basing on profile.
unsigned PeelCount
A forced peeling factor (the number of bodied of the original loop that should be peeled off before t...
Parameters that control the generic loop unrolling transformation.
bool UpperBound
Allow using trip count upper bound to unroll loops.
unsigned PartialOptSizeThreshold
The cost threshold for the unrolled loop when optimizing for size, like OptSizeThreshold,...
unsigned PartialThreshold
The cost threshold for the unrolled loop, like Threshold, but used for partial/runtime unrolling (set...
bool Runtime
Allow runtime unrolling (unrolling of loops to expand the size of the loop body even when the number ...
bool Partial
Allow partial unrolling (unrolling of loops to expand the size of the loop body, not only to eliminat...
unsigned OptSizeThreshold
The cost threshold for the unrolled loop when optimizing for size (set to UINT_MAX to disable).