LLVM 19.0.0git
BasicTTIImpl.h
Go to the documentation of this file.
1//===- BasicTTIImpl.h -------------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file provides a helper that implements much of the TTI interface in
11/// terms of the target-independent code generator and TargetLowering
12/// interfaces.
13//
14//===----------------------------------------------------------------------===//
15
16#ifndef LLVM_CODEGEN_BASICTTIIMPL_H
17#define LLVM_CODEGEN_BASICTTIIMPL_H
18
19#include "llvm/ADT/APInt.h"
20#include "llvm/ADT/ArrayRef.h"
21#include "llvm/ADT/BitVector.h"
33#include "llvm/IR/BasicBlock.h"
34#include "llvm/IR/Constant.h"
35#include "llvm/IR/Constants.h"
36#include "llvm/IR/DataLayout.h"
38#include "llvm/IR/InstrTypes.h"
39#include "llvm/IR/Instruction.h"
41#include "llvm/IR/Intrinsics.h"
42#include "llvm/IR/Operator.h"
43#include "llvm/IR/Type.h"
44#include "llvm/IR/Value.h"
52#include <algorithm>
53#include <cassert>
54#include <cstdint>
55#include <limits>
56#include <optional>
57#include <utility>
58
59namespace llvm {
60
61class Function;
62class GlobalValue;
63class LLVMContext;
64class ScalarEvolution;
65class SCEV;
66class TargetMachine;
67
68extern cl::opt<unsigned> PartialUnrollingThreshold;
69
70/// Base class which can be used to help build a TTI implementation.
71///
72/// This class provides as much implementation of the TTI interface as is
73/// possible using the target independent parts of the code generator.
74///
75/// In order to subclass it, your class must implement a getST() method to
76/// return the subtarget, and a getTLI() method to return the target lowering.
77/// We need these methods implemented in the derived class so that this class
78/// doesn't have to duplicate storage for them.
79template <typename T>
81private:
84
85 /// Helper function to access this as a T.
86 T *thisT() { return static_cast<T *>(this); }
87
88 /// Estimate a cost of Broadcast as an extract and sequence of insert
89 /// operations.
90 InstructionCost getBroadcastShuffleOverhead(FixedVectorType *VTy,
93 // Broadcast cost is equal to the cost of extracting the zero'th element
94 // plus the cost of inserting it into every element of the result vector.
95 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
96 CostKind, 0, nullptr, nullptr);
97
98 for (int i = 0, e = VTy->getNumElements(); i < e; ++i) {
99 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
100 CostKind, i, nullptr, nullptr);
101 }
102 return Cost;
103 }
104
105 /// Estimate a cost of shuffle as a sequence of extract and insert
106 /// operations.
107 InstructionCost getPermuteShuffleOverhead(FixedVectorType *VTy,
110 // Shuffle cost is equal to the cost of extracting element from its argument
111 // plus the cost of inserting them onto the result vector.
112
113 // e.g. <4 x float> has a mask of <0,5,2,7> i.e we need to extract from
114 // index 0 of first vector, index 1 of second vector,index 2 of first
115 // vector and finally index 3 of second vector and insert them at index
116 // <0,1,2,3> of result vector.
117 for (int i = 0, e = VTy->getNumElements(); i < e; ++i) {
118 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
119 CostKind, i, nullptr, nullptr);
120 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
121 CostKind, i, nullptr, nullptr);
122 }
123 return Cost;
124 }
125
126 /// Estimate a cost of subvector extraction as a sequence of extract and
127 /// insert operations.
128 InstructionCost getExtractSubvectorOverhead(VectorType *VTy,
130 int Index,
131 FixedVectorType *SubVTy) {
132 assert(VTy && SubVTy &&
133 "Can only extract subvectors from vectors");
134 int NumSubElts = SubVTy->getNumElements();
135 assert((!isa<FixedVectorType>(VTy) ||
136 (Index + NumSubElts) <=
137 (int)cast<FixedVectorType>(VTy)->getNumElements()) &&
138 "SK_ExtractSubvector index out of range");
139
141 // Subvector extraction cost is equal to the cost of extracting element from
142 // the source type plus the cost of inserting them into the result vector
143 // type.
144 for (int i = 0; i != NumSubElts; ++i) {
145 Cost +=
146 thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
147 CostKind, i + Index, nullptr, nullptr);
148 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, SubVTy,
149 CostKind, i, nullptr, nullptr);
150 }
151 return Cost;
152 }
153
154 /// Estimate a cost of subvector insertion as a sequence of extract and
155 /// insert operations.
156 InstructionCost getInsertSubvectorOverhead(VectorType *VTy,
158 int Index,
159 FixedVectorType *SubVTy) {
160 assert(VTy && SubVTy &&
161 "Can only insert subvectors into vectors");
162 int NumSubElts = SubVTy->getNumElements();
163 assert((!isa<FixedVectorType>(VTy) ||
164 (Index + NumSubElts) <=
165 (int)cast<FixedVectorType>(VTy)->getNumElements()) &&
166 "SK_InsertSubvector index out of range");
167
169 // Subvector insertion cost is equal to the cost of extracting element from
170 // the source type plus the cost of inserting them into the result vector
171 // type.
172 for (int i = 0; i != NumSubElts; ++i) {
173 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, SubVTy,
174 CostKind, i, nullptr, nullptr);
175 Cost +=
176 thisT()->getVectorInstrCost(Instruction::InsertElement, VTy, CostKind,
177 i + Index, nullptr, nullptr);
178 }
179 return Cost;
180 }
181
182 /// Local query method delegates up to T which *must* implement this!
183 const TargetSubtargetInfo *getST() const {
184 return static_cast<const T *>(this)->getST();
185 }
186
187 /// Local query method delegates up to T which *must* implement this!
188 const TargetLoweringBase *getTLI() const {
189 return static_cast<const T *>(this)->getTLI();
190 }
191
192 static ISD::MemIndexedMode getISDIndexedMode(TTI::MemIndexedMode M) {
193 switch (M) {
195 return ISD::UNINDEXED;
196 case TTI::MIM_PreInc:
197 return ISD::PRE_INC;
198 case TTI::MIM_PreDec:
199 return ISD::PRE_DEC;
200 case TTI::MIM_PostInc:
201 return ISD::POST_INC;
202 case TTI::MIM_PostDec:
203 return ISD::POST_DEC;
204 }
205 llvm_unreachable("Unexpected MemIndexedMode");
206 }
207
208 InstructionCost getCommonMaskedMemoryOpCost(unsigned Opcode, Type *DataTy,
209 Align Alignment,
210 bool VariableMask,
211 bool IsGatherScatter,
213 unsigned AddressSpace = 0) {
214 // We cannot scalarize scalable vectors, so return Invalid.
215 if (isa<ScalableVectorType>(DataTy))
217
218 auto *VT = cast<FixedVectorType>(DataTy);
219 unsigned VF = VT->getNumElements();
220
221 // Assume the target does not have support for gather/scatter operations
222 // and provide a rough estimate.
223 //
224 // First, compute the cost of the individual memory operations.
225 InstructionCost AddrExtractCost =
226 IsGatherScatter
229 PointerType::get(VT->getElementType(), 0), VF),
230 /*Insert=*/false, /*Extract=*/true, CostKind)
231 : 0;
232
233 // The cost of the scalar loads/stores.
234 InstructionCost MemoryOpCost =
235 VF * getMemoryOpCost(Opcode, VT->getElementType(), Alignment,
237
238 // Next, compute the cost of packing the result in a vector.
239 InstructionCost PackingCost =
240 getScalarizationOverhead(VT, Opcode != Instruction::Store,
241 Opcode == Instruction::Store, CostKind);
242
243 InstructionCost ConditionalCost = 0;
244 if (VariableMask) {
245 // Compute the cost of conditionally executing the memory operations with
246 // variable masks. This includes extracting the individual conditions, a
247 // branches and PHIs to combine the results.
248 // NOTE: Estimating the cost of conditionally executing the memory
249 // operations accurately is quite difficult and the current solution
250 // provides a very rough estimate only.
251 ConditionalCost =
254 /*Insert=*/false, /*Extract=*/true, CostKind) +
255 VF * (getCFInstrCost(Instruction::Br, CostKind) +
256 getCFInstrCost(Instruction::PHI, CostKind));
257 }
258
259 return AddrExtractCost + MemoryOpCost + PackingCost + ConditionalCost;
260 }
261
262protected:
264 : BaseT(DL) {}
265 virtual ~BasicTTIImplBase() = default;
266
268
269public:
270 /// \name Scalar TTI Implementations
271 /// @{
273 unsigned AddressSpace, Align Alignment,
274 unsigned *Fast) const {
276 return getTLI()->allowsMisalignedMemoryAccesses(
278 }
279
280 bool hasBranchDivergence(const Function *F = nullptr) { return false; }
281
282 bool isSourceOfDivergence(const Value *V) { return false; }
283
284 bool isAlwaysUniform(const Value *V) { return false; }
285
286 bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const {
287 return false;
288 }
289
290 bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const {
291 return true;
292 }
293
295 // Return an invalid address space.
296 return -1;
297 }
298
300 Intrinsic::ID IID) const {
301 return false;
302 }
303
304 bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const {
305 return getTLI()->getTargetMachine().isNoopAddrSpaceCast(FromAS, ToAS);
306 }
307
308 unsigned getAssumedAddrSpace(const Value *V) const {
309 return getTLI()->getTargetMachine().getAssumedAddrSpace(V);
310 }
311
312 bool isSingleThreaded() const {
313 return getTLI()->getTargetMachine().Options.ThreadModel ==
315 }
316
317 std::pair<const Value *, unsigned>
319 return getTLI()->getTargetMachine().getPredicatedAddrSpace(V);
320 }
321
323 Value *NewV) const {
324 return nullptr;
325 }
326
327 bool isLegalAddImmediate(int64_t imm) {
328 return getTLI()->isLegalAddImmediate(imm);
329 }
330
331 bool isLegalAddScalableImmediate(int64_t Imm) {
332 return getTLI()->isLegalAddScalableImmediate(Imm);
333 }
334
335 bool isLegalICmpImmediate(int64_t imm) {
336 return getTLI()->isLegalICmpImmediate(imm);
337 }
338
339 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
340 bool HasBaseReg, int64_t Scale, unsigned AddrSpace,
341 Instruction *I = nullptr,
342 int64_t ScalableOffset = 0) {
344 AM.BaseGV = BaseGV;
345 AM.BaseOffs = BaseOffset;
346 AM.HasBaseReg = HasBaseReg;
347 AM.Scale = Scale;
348 AM.ScalableOffset = ScalableOffset;
349 return getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace, I);
350 }
351
352 int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset, int64_t MaxOffset) {
353 return getTLI()->getPreferredLargeGEPBaseOffset(MinOffset, MaxOffset);
354 }
355
356 unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy,
357 Type *ScalarValTy) const {
358 auto &&IsSupportedByTarget = [this, ScalarMemTy, ScalarValTy](unsigned VF) {
359 auto *SrcTy = FixedVectorType::get(ScalarMemTy, VF / 2);
360 EVT VT = getTLI()->getValueType(DL, SrcTy);
361 if (getTLI()->isOperationLegal(ISD::STORE, VT) ||
362 getTLI()->isOperationCustom(ISD::STORE, VT))
363 return true;
364
365 EVT ValVT =
366 getTLI()->getValueType(DL, FixedVectorType::get(ScalarValTy, VF / 2));
367 EVT LegalizedVT =
368 getTLI()->getTypeToTransformTo(ScalarMemTy->getContext(), VT);
369 return getTLI()->isTruncStoreLegal(LegalizedVT, ValVT);
370 };
371 while (VF > 2 && IsSupportedByTarget(VF))
372 VF /= 2;
373 return VF;
374 }
375
377 const DataLayout &DL) const {
378 EVT VT = getTLI()->getValueType(DL, Ty);
379 return getTLI()->isIndexedLoadLegal(getISDIndexedMode(M), VT);
380 }
381
383 const DataLayout &DL) const {
384 EVT VT = getTLI()->getValueType(DL, Ty);
385 return getTLI()->isIndexedStoreLegal(getISDIndexedMode(M), VT);
386 }
387
390 }
391
394 }
395
399 }
400
403 }
404
406 int64_t BaseOffset, bool HasBaseReg,
407 int64_t Scale, unsigned AddrSpace) {
409 AM.BaseGV = BaseGV;
410 AM.BaseOffs = BaseOffset;
411 AM.HasBaseReg = HasBaseReg;
412 AM.Scale = Scale;
413 if (getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace))
414 return 0;
415 return -1;
416 }
417
418 bool isTruncateFree(Type *Ty1, Type *Ty2) {
419 return getTLI()->isTruncateFree(Ty1, Ty2);
420 }
421
423 return getTLI()->isProfitableToHoist(I);
424 }
425
426 bool useAA() const { return getST()->useAA(); }
427
428 bool isTypeLegal(Type *Ty) {
429 EVT VT = getTLI()->getValueType(DL, Ty);
430 return getTLI()->isTypeLegal(VT);
431 }
432
433 unsigned getRegUsageForType(Type *Ty) {
434 EVT ETy = getTLI()->getValueType(DL, Ty);
435 return getTLI()->getNumRegisters(Ty->getContext(), ETy);
436 }
437
441 return BaseT::getGEPCost(PointeeType, Ptr, Operands, AccessType, CostKind);
442 }
443
445 unsigned &JumpTableSize,
447 BlockFrequencyInfo *BFI) {
448 /// Try to find the estimated number of clusters. Note that the number of
449 /// clusters identified in this function could be different from the actual
450 /// numbers found in lowering. This function ignore switches that are
451 /// lowered with a mix of jump table / bit test / BTree. This function was
452 /// initially intended to be used when estimating the cost of switch in
453 /// inline cost heuristic, but it's a generic cost model to be used in other
454 /// places (e.g., in loop unrolling).
455 unsigned N = SI.getNumCases();
456 const TargetLoweringBase *TLI = getTLI();
457 const DataLayout &DL = this->getDataLayout();
458
459 JumpTableSize = 0;
460 bool IsJTAllowed = TLI->areJTsAllowed(SI.getParent()->getParent());
461
462 // Early exit if both a jump table and bit test are not allowed.
463 if (N < 1 || (!IsJTAllowed && DL.getIndexSizeInBits(0u) < N))
464 return N;
465
466 APInt MaxCaseVal = SI.case_begin()->getCaseValue()->getValue();
467 APInt MinCaseVal = MaxCaseVal;
468 for (auto CI : SI.cases()) {
469 const APInt &CaseVal = CI.getCaseValue()->getValue();
470 if (CaseVal.sgt(MaxCaseVal))
471 MaxCaseVal = CaseVal;
472 if (CaseVal.slt(MinCaseVal))
473 MinCaseVal = CaseVal;
474 }
475
476 // Check if suitable for a bit test
477 if (N <= DL.getIndexSizeInBits(0u)) {
479 for (auto I : SI.cases())
480 Dests.insert(I.getCaseSuccessor());
481
482 if (TLI->isSuitableForBitTests(Dests.size(), N, MinCaseVal, MaxCaseVal,
483 DL))
484 return 1;
485 }
486
487 // Check if suitable for a jump table.
488 if (IsJTAllowed) {
489 if (N < 2 || N < TLI->getMinimumJumpTableEntries())
490 return N;
491 uint64_t Range =
492 (MaxCaseVal - MinCaseVal)
493 .getLimitedValue(std::numeric_limits<uint64_t>::max() - 1) + 1;
494 // Check whether a range of clusters is dense enough for a jump table
495 if (TLI->isSuitableForJumpTable(&SI, N, Range, PSI, BFI)) {
496 JumpTableSize = Range;
497 return 1;
498 }
499 }
500 return N;
501 }
502
504 const TargetLoweringBase *TLI = getTLI();
505 return TLI->isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
506 TLI->isOperationLegalOrCustom(ISD::BRIND, MVT::Other);
507 }
508
510 const TargetMachine &TM = getTLI()->getTargetMachine();
511 // If non-PIC mode, do not generate a relative lookup table.
512 if (!TM.isPositionIndependent())
513 return false;
514
515 /// Relative lookup table entries consist of 32-bit offsets.
516 /// Do not generate relative lookup tables for large code models
517 /// in 64-bit achitectures where 32-bit offsets might not be enough.
518 if (TM.getCodeModel() == CodeModel::Medium ||
519 TM.getCodeModel() == CodeModel::Large)
520 return false;
521
522 Triple TargetTriple = TM.getTargetTriple();
523 if (!TargetTriple.isArch64Bit())
524 return false;
525
526 // TODO: Triggers issues on aarch64 on darwin, so temporarily disable it
527 // there.
528 if (TargetTriple.getArch() == Triple::aarch64 && TargetTriple.isOSDarwin())
529 return false;
530
531 return true;
532 }
533
534 bool haveFastSqrt(Type *Ty) {
535 const TargetLoweringBase *TLI = getTLI();
536 EVT VT = TLI->getValueType(DL, Ty);
537 return TLI->isTypeLegal(VT) &&
539 }
540
542 return true;
543 }
544
546 // Check whether FADD is available, as a proxy for floating-point in
547 // general.
548 const TargetLoweringBase *TLI = getTLI();
549 EVT VT = TLI->getValueType(DL, Ty);
553 }
554
556 const Function &Fn) const {
557 switch (Inst.getOpcode()) {
558 default:
559 break;
560 case Instruction::SDiv:
561 case Instruction::SRem:
562 case Instruction::UDiv:
563 case Instruction::URem: {
564 if (!isa<ConstantInt>(Inst.getOperand(1)))
565 return false;
566 EVT VT = getTLI()->getValueType(DL, Inst.getType());
567 return !getTLI()->isIntDivCheap(VT, Fn.getAttributes());
568 }
569 };
570
571 return false;
572 }
573
574 unsigned getInliningThresholdMultiplier() const { return 1; }
575 unsigned adjustInliningThreshold(const CallBase *CB) { return 0; }
576 unsigned getCallerAllocaCost(const CallBase *CB, const AllocaInst *AI) const {
577 return 0;
578 }
579
580 int getInlinerVectorBonusPercent() const { return 150; }
581
585 // This unrolling functionality is target independent, but to provide some
586 // motivation for its intended use, for x86:
587
588 // According to the Intel 64 and IA-32 Architectures Optimization Reference
589 // Manual, Intel Core models and later have a loop stream detector (and
590 // associated uop queue) that can benefit from partial unrolling.
591 // The relevant requirements are:
592 // - The loop must have no more than 4 (8 for Nehalem and later) branches
593 // taken, and none of them may be calls.
594 // - The loop can have no more than 18 (28 for Nehalem and later) uops.
595
596 // According to the Software Optimization Guide for AMD Family 15h
597 // Processors, models 30h-4fh (Steamroller and later) have a loop predictor
598 // and loop buffer which can benefit from partial unrolling.
599 // The relevant requirements are:
600 // - The loop must have fewer than 16 branches
601 // - The loop must have less than 40 uops in all executed loop branches
602
603 // The number of taken branches in a loop is hard to estimate here, and
604 // benchmarking has revealed that it is better not to be conservative when
605 // estimating the branch count. As a result, we'll ignore the branch limits
606 // until someone finds a case where it matters in practice.
607
608 unsigned MaxOps;
609 const TargetSubtargetInfo *ST = getST();
610 if (PartialUnrollingThreshold.getNumOccurrences() > 0)
612 else if (ST->getSchedModel().LoopMicroOpBufferSize > 0)
613 MaxOps = ST->getSchedModel().LoopMicroOpBufferSize;
614 else
615 return;
616
617 // Scan the loop: don't unroll loops with calls.
618 for (BasicBlock *BB : L->blocks()) {
619 for (Instruction &I : *BB) {
620 if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
621 if (const Function *F = cast<CallBase>(I).getCalledFunction()) {
622 if (!thisT()->isLoweredToCall(F))
623 continue;
624 }
625
626 if (ORE) {
627 ORE->emit([&]() {
628 return OptimizationRemark("TTI", "DontUnroll", L->getStartLoc(),
629 L->getHeader())
630 << "advising against unrolling the loop because it "
631 "contains a "
632 << ore::NV("Call", &I);
633 });
634 }
635 return;
636 }
637 }
638 }
639
640 // Enable runtime and partial unrolling up to the specified size.
641 // Enable using trip count upper bound to unroll loops.
642 UP.Partial = UP.Runtime = UP.UpperBound = true;
643 UP.PartialThreshold = MaxOps;
644
645 // Avoid unrolling when optimizing for size.
646 UP.OptSizeThreshold = 0;
648
649 // Set number of instructions optimized when "back edge"
650 // becomes "fall through" to default value of 2.
651 UP.BEInsns = 2;
652 }
653
656 PP.PeelCount = 0;
657 PP.AllowPeeling = true;
658 PP.AllowLoopNestsPeeling = false;
659 PP.PeelProfiledIterations = true;
660 }
661
663 AssumptionCache &AC,
664 TargetLibraryInfo *LibInfo,
665 HardwareLoopInfo &HWLoopInfo) {
666 return BaseT::isHardwareLoopProfitable(L, SE, AC, LibInfo, HWLoopInfo);
667 }
668
671 }
672
674 getPreferredTailFoldingStyle(bool IVUpdateMayOverflow = true) {
675 return BaseT::getPreferredTailFoldingStyle(IVUpdateMayOverflow);
676 }
677
678 std::optional<Instruction *> instCombineIntrinsic(InstCombiner &IC,
679 IntrinsicInst &II) {
680 return BaseT::instCombineIntrinsic(IC, II);
681 }
682
683 std::optional<Value *>
685 APInt DemandedMask, KnownBits &Known,
686 bool &KnownBitsComputed) {
687 return BaseT::simplifyDemandedUseBitsIntrinsic(IC, II, DemandedMask, Known,
688 KnownBitsComputed);
689 }
690
692 InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
693 APInt &UndefElts2, APInt &UndefElts3,
694 std::function<void(Instruction *, unsigned, APInt, APInt &)>
695 SimplifyAndSetOp) {
697 IC, II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
698 SimplifyAndSetOp);
699 }
700
701 virtual std::optional<unsigned>
703 return std::optional<unsigned>(
704 getST()->getCacheSize(static_cast<unsigned>(Level)));
705 }
706
707 virtual std::optional<unsigned>
709 std::optional<unsigned> TargetResult =
710 getST()->getCacheAssociativity(static_cast<unsigned>(Level));
711
712 if (TargetResult)
713 return TargetResult;
714
715 return BaseT::getCacheAssociativity(Level);
716 }
717
718 virtual unsigned getCacheLineSize() const {
719 return getST()->getCacheLineSize();
720 }
721
722 virtual unsigned getPrefetchDistance() const {
723 return getST()->getPrefetchDistance();
724 }
725
726 virtual unsigned getMinPrefetchStride(unsigned NumMemAccesses,
727 unsigned NumStridedMemAccesses,
728 unsigned NumPrefetches,
729 bool HasCall) const {
730 return getST()->getMinPrefetchStride(NumMemAccesses, NumStridedMemAccesses,
731 NumPrefetches, HasCall);
732 }
733
734 virtual unsigned getMaxPrefetchIterationsAhead() const {
735 return getST()->getMaxPrefetchIterationsAhead();
736 }
737
738 virtual bool enableWritePrefetching() const {
739 return getST()->enableWritePrefetching();
740 }
741
742 virtual bool shouldPrefetchAddressSpace(unsigned AS) const {
743 return getST()->shouldPrefetchAddressSpace(AS);
744 }
745
746 /// @}
747
748 /// \name Vector TTI Implementations
749 /// @{
750
752 return TypeSize::getFixed(32);
753 }
754
755 std::optional<unsigned> getMaxVScale() const { return std::nullopt; }
756 std::optional<unsigned> getVScaleForTuning() const { return std::nullopt; }
757 bool isVScaleKnownToBeAPowerOfTwo() const { return false; }
758
759 /// Estimate the overhead of scalarizing an instruction. Insert and Extract
760 /// are set if the demanded result elements need to be inserted and/or
761 /// extracted from vectors.
763 const APInt &DemandedElts,
764 bool Insert, bool Extract,
766 /// FIXME: a bitfield is not a reasonable abstraction for talking about
767 /// which elements are needed from a scalable vector
768 if (isa<ScalableVectorType>(InTy))
770 auto *Ty = cast<FixedVectorType>(InTy);
771
772 assert(DemandedElts.getBitWidth() == Ty->getNumElements() &&
773 "Vector size mismatch");
774
776
777 for (int i = 0, e = Ty->getNumElements(); i < e; ++i) {
778 if (!DemandedElts[i])
779 continue;
780 if (Insert)
781 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, Ty,
782 CostKind, i, nullptr, nullptr);
783 if (Extract)
784 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
785 CostKind, i, nullptr, nullptr);
786 }
787
788 return Cost;
789 }
790
791 /// Helper wrapper for the DemandedElts variant of getScalarizationOverhead.
793 bool Extract,
795 if (isa<ScalableVectorType>(InTy))
797 auto *Ty = cast<FixedVectorType>(InTy);
798
799 APInt DemandedElts = APInt::getAllOnes(Ty->getNumElements());
800 return thisT()->getScalarizationOverhead(Ty, DemandedElts, Insert, Extract,
801 CostKind);
802 }
803
804 /// Estimate the overhead of scalarizing an instructions unique
805 /// non-constant operands. The (potentially vector) types to use for each of
806 /// argument are passes via Tys.
811 assert(Args.size() == Tys.size() && "Expected matching Args and Tys");
812
814 SmallPtrSet<const Value*, 4> UniqueOperands;
815 for (int I = 0, E = Args.size(); I != E; I++) {
816 // Disregard things like metadata arguments.
817 const Value *A = Args[I];
818 Type *Ty = Tys[I];
819 if (!Ty->isIntOrIntVectorTy() && !Ty->isFPOrFPVectorTy() &&
820 !Ty->isPtrOrPtrVectorTy())
821 continue;
822
823 if (!isa<Constant>(A) && UniqueOperands.insert(A).second) {
824 if (auto *VecTy = dyn_cast<VectorType>(Ty))
825 Cost += getScalarizationOverhead(VecTy, /*Insert*/ false,
826 /*Extract*/ true, CostKind);
827 }
828 }
829
830 return Cost;
831 }
832
833 /// Estimate the overhead of scalarizing the inputs and outputs of an
834 /// instruction, with return type RetTy and arguments Args of type Tys. If
835 /// Args are unknown (empty), then the cost associated with one argument is
836 /// added as a heuristic.
842 RetTy, /*Insert*/ true, /*Extract*/ false, CostKind);
843 if (!Args.empty())
845 else
846 // When no information on arguments is provided, we add the cost
847 // associated with one argument as a heuristic.
848 Cost += getScalarizationOverhead(RetTy, /*Insert*/ false,
849 /*Extract*/ true, CostKind);
850
851 return Cost;
852 }
853
854 /// Estimate the cost of type-legalization and the legalized type.
855 std::pair<InstructionCost, MVT> getTypeLegalizationCost(Type *Ty) const {
856 LLVMContext &C = Ty->getContext();
857 EVT MTy = getTLI()->getValueType(DL, Ty);
858
860 // We keep legalizing the type until we find a legal kind. We assume that
861 // the only operation that costs anything is the split. After splitting
862 // we need to handle two types.
863 while (true) {
865
867 // Ensure we return a sensible simple VT here, since many callers of
868 // this function require it.
869 MVT VT = MTy.isSimple() ? MTy.getSimpleVT() : MVT::i64;
870 return std::make_pair(InstructionCost::getInvalid(), VT);
871 }
872
873 if (LK.first == TargetLoweringBase::TypeLegal)
874 return std::make_pair(Cost, MTy.getSimpleVT());
875
876 if (LK.first == TargetLoweringBase::TypeSplitVector ||
878 Cost *= 2;
879
880 // Do not loop with f128 type.
881 if (MTy == LK.second)
882 return std::make_pair(Cost, MTy.getSimpleVT());
883
884 // Keep legalizing the type.
885 MTy = LK.second;
886 }
887 }
888
889 unsigned getMaxInterleaveFactor(ElementCount VF) { return 1; }
890
892 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
895 ArrayRef<const Value *> Args = ArrayRef<const Value *>(),
896 const Instruction *CxtI = nullptr) {
897 // Check if any of the operands are vector operands.
898 const TargetLoweringBase *TLI = getTLI();
899 int ISD = TLI->InstructionOpcodeToISD(Opcode);
900 assert(ISD && "Invalid opcode");
901
902 // TODO: Handle more cost kinds.
904 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind,
905 Opd1Info, Opd2Info,
906 Args, CxtI);
907
908 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
909
910 bool IsFloat = Ty->isFPOrFPVectorTy();
911 // Assume that floating point arithmetic operations cost twice as much as
912 // integer operations.
913 InstructionCost OpCost = (IsFloat ? 2 : 1);
914
915 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
916 // The operation is legal. Assume it costs 1.
917 // TODO: Once we have extract/insert subvector cost we need to use them.
918 return LT.first * OpCost;
919 }
920
921 if (!TLI->isOperationExpand(ISD, LT.second)) {
922 // If the operation is custom lowered, then assume that the code is twice
923 // as expensive.
924 return LT.first * 2 * OpCost;
925 }
926
927 // An 'Expand' of URem and SRem is special because it may default
928 // to expanding the operation into a sequence of sub-operations
929 // i.e. X % Y -> X-(X/Y)*Y.
930 if (ISD == ISD::UREM || ISD == ISD::SREM) {
931 bool IsSigned = ISD == ISD::SREM;
932 if (TLI->isOperationLegalOrCustom(IsSigned ? ISD::SDIVREM : ISD::UDIVREM,
933 LT.second) ||
934 TLI->isOperationLegalOrCustom(IsSigned ? ISD::SDIV : ISD::UDIV,
935 LT.second)) {
936 unsigned DivOpc = IsSigned ? Instruction::SDiv : Instruction::UDiv;
937 InstructionCost DivCost = thisT()->getArithmeticInstrCost(
938 DivOpc, Ty, CostKind, Opd1Info, Opd2Info);
939 InstructionCost MulCost =
940 thisT()->getArithmeticInstrCost(Instruction::Mul, Ty, CostKind);
941 InstructionCost SubCost =
942 thisT()->getArithmeticInstrCost(Instruction::Sub, Ty, CostKind);
943 return DivCost + MulCost + SubCost;
944 }
945 }
946
947 // We cannot scalarize scalable vectors, so return Invalid.
948 if (isa<ScalableVectorType>(Ty))
950
951 // Else, assume that we need to scalarize this op.
952 // TODO: If one of the types get legalized by splitting, handle this
953 // similarly to what getCastInstrCost() does.
954 if (auto *VTy = dyn_cast<FixedVectorType>(Ty)) {
955 InstructionCost Cost = thisT()->getArithmeticInstrCost(
956 Opcode, VTy->getScalarType(), CostKind, Opd1Info, Opd2Info,
957 Args, CxtI);
958 // Return the cost of multiple scalar invocation plus the cost of
959 // inserting and extracting the values.
960 SmallVector<Type *> Tys(Args.size(), Ty);
961 return getScalarizationOverhead(VTy, Args, Tys, CostKind) +
962 VTy->getNumElements() * Cost;
963 }
964
965 // We don't know anything about this scalar instruction.
966 return OpCost;
967 }
968
970 ArrayRef<int> Mask,
971 VectorType *Ty, int &Index,
972 VectorType *&SubTy) const {
973 if (Mask.empty())
974 return Kind;
975 int NumSrcElts = Ty->getElementCount().getKnownMinValue();
976 switch (Kind) {
978 if (ShuffleVectorInst::isReverseMask(Mask, NumSrcElts))
979 return TTI::SK_Reverse;
980 if (ShuffleVectorInst::isZeroEltSplatMask(Mask, NumSrcElts))
981 return TTI::SK_Broadcast;
982 if (ShuffleVectorInst::isExtractSubvectorMask(Mask, NumSrcElts, Index) &&
983 (Index + Mask.size()) <= (size_t)NumSrcElts) {
984 SubTy = FixedVectorType::get(Ty->getElementType(), Mask.size());
986 }
987 break;
989 int NumSubElts;
990 if (Mask.size() > 2 && ShuffleVectorInst::isInsertSubvectorMask(
991 Mask, NumSrcElts, NumSubElts, Index)) {
992 if (Index + NumSubElts > NumSrcElts)
993 return Kind;
994 SubTy = FixedVectorType::get(Ty->getElementType(), NumSubElts);
996 }
997 if (ShuffleVectorInst::isSelectMask(Mask, NumSrcElts))
998 return TTI::SK_Select;
999 if (ShuffleVectorInst::isTransposeMask(Mask, NumSrcElts))
1000 return TTI::SK_Transpose;
1001 if (ShuffleVectorInst::isSpliceMask(Mask, NumSrcElts, Index))
1002 return TTI::SK_Splice;
1003 break;
1004 }
1005 case TTI::SK_Select:
1006 case TTI::SK_Reverse:
1007 case TTI::SK_Broadcast:
1008 case TTI::SK_Transpose:
1011 case TTI::SK_Splice:
1012 break;
1013 }
1014 return Kind;
1015 }
1016
1018 ArrayRef<int> Mask,
1020 VectorType *SubTp,
1021 ArrayRef<const Value *> Args = std::nullopt,
1022 const Instruction *CxtI = nullptr) {
1023 switch (improveShuffleKindFromMask(Kind, Mask, Tp, Index, SubTp)) {
1024 case TTI::SK_Broadcast:
1025 if (auto *FVT = dyn_cast<FixedVectorType>(Tp))
1026 return getBroadcastShuffleOverhead(FVT, CostKind);
1028 case TTI::SK_Select:
1029 case TTI::SK_Splice:
1030 case TTI::SK_Reverse:
1031 case TTI::SK_Transpose:
1034 if (auto *FVT = dyn_cast<FixedVectorType>(Tp))
1035 return getPermuteShuffleOverhead(FVT, CostKind);
1038 return getExtractSubvectorOverhead(Tp, CostKind, Index,
1039 cast<FixedVectorType>(SubTp));
1041 return getInsertSubvectorOverhead(Tp, CostKind, Index,
1042 cast<FixedVectorType>(SubTp));
1043 }
1044 llvm_unreachable("Unknown TTI::ShuffleKind");
1045 }
1046
1047 InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
1050 const Instruction *I = nullptr) {
1051 if (BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I) == 0)
1052 return 0;
1053
1054 const TargetLoweringBase *TLI = getTLI();
1055 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1056 assert(ISD && "Invalid opcode");
1057 std::pair<InstructionCost, MVT> SrcLT = getTypeLegalizationCost(Src);
1058 std::pair<InstructionCost, MVT> DstLT = getTypeLegalizationCost(Dst);
1059
1060 TypeSize SrcSize = SrcLT.second.getSizeInBits();
1061 TypeSize DstSize = DstLT.second.getSizeInBits();
1062 bool IntOrPtrSrc = Src->isIntegerTy() || Src->isPointerTy();
1063 bool IntOrPtrDst = Dst->isIntegerTy() || Dst->isPointerTy();
1064
1065 switch (Opcode) {
1066 default:
1067 break;
1068 case Instruction::Trunc:
1069 // Check for NOOP conversions.
1070 if (TLI->isTruncateFree(SrcLT.second, DstLT.second))
1071 return 0;
1072 [[fallthrough]];
1073 case Instruction::BitCast:
1074 // Bitcast between types that are legalized to the same type are free and
1075 // assume int to/from ptr of the same size is also free.
1076 if (SrcLT.first == DstLT.first && IntOrPtrSrc == IntOrPtrDst &&
1077 SrcSize == DstSize)
1078 return 0;
1079 break;
1080 case Instruction::FPExt:
1081 if (I && getTLI()->isExtFree(I))
1082 return 0;
1083 break;
1084 case Instruction::ZExt:
1085 if (TLI->isZExtFree(SrcLT.second, DstLT.second))
1086 return 0;
1087 [[fallthrough]];
1088 case Instruction::SExt:
1089 if (I && getTLI()->isExtFree(I))
1090 return 0;
1091
1092 // If this is a zext/sext of a load, return 0 if the corresponding
1093 // extending load exists on target and the result type is legal.
1094 if (CCH == TTI::CastContextHint::Normal) {
1095 EVT ExtVT = EVT::getEVT(Dst);
1096 EVT LoadVT = EVT::getEVT(Src);
1097 unsigned LType =
1098 ((Opcode == Instruction::ZExt) ? ISD::ZEXTLOAD : ISD::SEXTLOAD);
1099 if (DstLT.first == SrcLT.first &&
1100 TLI->isLoadExtLegal(LType, ExtVT, LoadVT))
1101 return 0;
1102 }
1103 break;
1104 case Instruction::AddrSpaceCast:
1105 if (TLI->isFreeAddrSpaceCast(Src->getPointerAddressSpace(),
1106 Dst->getPointerAddressSpace()))
1107 return 0;
1108 break;
1109 }
1110
1111 auto *SrcVTy = dyn_cast<VectorType>(Src);
1112 auto *DstVTy = dyn_cast<VectorType>(Dst);
1113
1114 // If the cast is marked as legal (or promote) then assume low cost.
1115 if (SrcLT.first == DstLT.first &&
1116 TLI->isOperationLegalOrPromote(ISD, DstLT.second))
1117 return SrcLT.first;
1118
1119 // Handle scalar conversions.
1120 if (!SrcVTy && !DstVTy) {
1121 // Just check the op cost. If the operation is legal then assume it costs
1122 // 1.
1123 if (!TLI->isOperationExpand(ISD, DstLT.second))
1124 return 1;
1125
1126 // Assume that illegal scalar instruction are expensive.
1127 return 4;
1128 }
1129
1130 // Check vector-to-vector casts.
1131 if (DstVTy && SrcVTy) {
1132 // If the cast is between same-sized registers, then the check is simple.
1133 if (SrcLT.first == DstLT.first && SrcSize == DstSize) {
1134
1135 // Assume that Zext is done using AND.
1136 if (Opcode == Instruction::ZExt)
1137 return SrcLT.first;
1138
1139 // Assume that sext is done using SHL and SRA.
1140 if (Opcode == Instruction::SExt)
1141 return SrcLT.first * 2;
1142
1143 // Just check the op cost. If the operation is legal then assume it
1144 // costs
1145 // 1 and multiply by the type-legalization overhead.
1146 if (!TLI->isOperationExpand(ISD, DstLT.second))
1147 return SrcLT.first * 1;
1148 }
1149
1150 // If we are legalizing by splitting, query the concrete TTI for the cost
1151 // of casting the original vector twice. We also need to factor in the
1152 // cost of the split itself. Count that as 1, to be consistent with
1153 // getTypeLegalizationCost().
1154 bool SplitSrc =
1155 TLI->getTypeAction(Src->getContext(), TLI->getValueType(DL, Src)) ==
1157 bool SplitDst =
1158 TLI->getTypeAction(Dst->getContext(), TLI->getValueType(DL, Dst)) ==
1160 if ((SplitSrc || SplitDst) && SrcVTy->getElementCount().isVector() &&
1161 DstVTy->getElementCount().isVector()) {
1162 Type *SplitDstTy = VectorType::getHalfElementsVectorType(DstVTy);
1163 Type *SplitSrcTy = VectorType::getHalfElementsVectorType(SrcVTy);
1164 T *TTI = static_cast<T *>(this);
1165 // If both types need to be split then the split is free.
1166 InstructionCost SplitCost =
1167 (!SplitSrc || !SplitDst) ? TTI->getVectorSplitCost() : 0;
1168 return SplitCost +
1169 (2 * TTI->getCastInstrCost(Opcode, SplitDstTy, SplitSrcTy, CCH,
1170 CostKind, I));
1171 }
1172
1173 // Scalarization cost is Invalid, can't assume any num elements.
1174 if (isa<ScalableVectorType>(DstVTy))
1176
1177 // In other cases where the source or destination are illegal, assume
1178 // the operation will get scalarized.
1179 unsigned Num = cast<FixedVectorType>(DstVTy)->getNumElements();
1180 InstructionCost Cost = thisT()->getCastInstrCost(
1181 Opcode, Dst->getScalarType(), Src->getScalarType(), CCH, CostKind, I);
1182
1183 // Return the cost of multiple scalar invocation plus the cost of
1184 // inserting and extracting the values.
1185 return getScalarizationOverhead(DstVTy, /*Insert*/ true, /*Extract*/ true,
1186 CostKind) +
1187 Num * Cost;
1188 }
1189
1190 // We already handled vector-to-vector and scalar-to-scalar conversions.
1191 // This
1192 // is where we handle bitcast between vectors and scalars. We need to assume
1193 // that the conversion is scalarized in one way or another.
1194 if (Opcode == Instruction::BitCast) {
1195 // Illegal bitcasts are done by storing and loading from a stack slot.
1196 return (SrcVTy ? getScalarizationOverhead(SrcVTy, /*Insert*/ false,
1197 /*Extract*/ true, CostKind)
1198 : 0) +
1199 (DstVTy ? getScalarizationOverhead(DstVTy, /*Insert*/ true,
1200 /*Extract*/ false, CostKind)
1201 : 0);
1202 }
1203
1204 llvm_unreachable("Unhandled cast");
1205 }
1206
1208 VectorType *VecTy, unsigned Index) {
1210 return thisT()->getVectorInstrCost(Instruction::ExtractElement, VecTy,
1211 CostKind, Index, nullptr, nullptr) +
1212 thisT()->getCastInstrCost(Opcode, Dst, VecTy->getElementType(),
1214 }
1215
1217 const Instruction *I = nullptr) {
1218 return BaseT::getCFInstrCost(Opcode, CostKind, I);
1219 }
1220
1221 InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
1222 CmpInst::Predicate VecPred,
1224 const Instruction *I = nullptr) {
1225 const TargetLoweringBase *TLI = getTLI();
1226 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1227 assert(ISD && "Invalid opcode");
1228
1229 // TODO: Handle other cost kinds.
1231 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
1232 I);
1233
1234 // Selects on vectors are actually vector selects.
1235 if (ISD == ISD::SELECT) {
1236 assert(CondTy && "CondTy must exist");
1237 if (CondTy->isVectorTy())
1238 ISD = ISD::VSELECT;
1239 }
1240 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
1241
1242 if (!(ValTy->isVectorTy() && !LT.second.isVector()) &&
1243 !TLI->isOperationExpand(ISD, LT.second)) {
1244 // The operation is legal. Assume it costs 1. Multiply
1245 // by the type-legalization overhead.
1246 return LT.first * 1;
1247 }
1248
1249 // Otherwise, assume that the cast is scalarized.
1250 // TODO: If one of the types get legalized by splitting, handle this
1251 // similarly to what getCastInstrCost() does.
1252 if (auto *ValVTy = dyn_cast<VectorType>(ValTy)) {
1253 if (isa<ScalableVectorType>(ValTy))
1255
1256 unsigned Num = cast<FixedVectorType>(ValVTy)->getNumElements();
1257 if (CondTy)
1258 CondTy = CondTy->getScalarType();
1259 InstructionCost Cost = thisT()->getCmpSelInstrCost(
1260 Opcode, ValVTy->getScalarType(), CondTy, VecPred, CostKind, I);
1261
1262 // Return the cost of multiple scalar invocation plus the cost of
1263 // inserting and extracting the values.
1264 return getScalarizationOverhead(ValVTy, /*Insert*/ true,
1265 /*Extract*/ false, CostKind) +
1266 Num * Cost;
1267 }
1268
1269 // Unknown scalar opcode.
1270 return 1;
1271 }
1272
1275 unsigned Index, Value *Op0, Value *Op1) {
1276 return getRegUsageForType(Val->getScalarType());
1277 }
1278
1281 unsigned Index) {
1282 Value *Op0 = nullptr;
1283 Value *Op1 = nullptr;
1284 if (auto *IE = dyn_cast<InsertElementInst>(&I)) {
1285 Op0 = IE->getOperand(0);
1286 Op1 = IE->getOperand(1);
1287 }
1288 return thisT()->getVectorInstrCost(I.getOpcode(), Val, CostKind, Index, Op0,
1289 Op1);
1290 }
1291
1292 InstructionCost getReplicationShuffleCost(Type *EltTy, int ReplicationFactor,
1293 int VF,
1294 const APInt &DemandedDstElts,
1296 assert(DemandedDstElts.getBitWidth() == (unsigned)VF * ReplicationFactor &&
1297 "Unexpected size of DemandedDstElts.");
1298
1300
1301 auto *SrcVT = FixedVectorType::get(EltTy, VF);
1302 auto *ReplicatedVT = FixedVectorType::get(EltTy, VF * ReplicationFactor);
1303
1304 // The Mask shuffling cost is extract all the elements of the Mask
1305 // and insert each of them Factor times into the wide vector:
1306 //
1307 // E.g. an interleaved group with factor 3:
1308 // %mask = icmp ult <8 x i32> %vec1, %vec2
1309 // %interleaved.mask = shufflevector <8 x i1> %mask, <8 x i1> undef,
1310 // <24 x i32> <0,0,0,1,1,1,2,2,2,3,3,3,4,4,4,5,5,5,6,6,6,7,7,7>
1311 // The cost is estimated as extract all mask elements from the <8xi1> mask
1312 // vector and insert them factor times into the <24xi1> shuffled mask
1313 // vector.
1314 APInt DemandedSrcElts = APIntOps::ScaleBitMask(DemandedDstElts, VF);
1315 Cost += thisT()->getScalarizationOverhead(SrcVT, DemandedSrcElts,
1316 /*Insert*/ false,
1317 /*Extract*/ true, CostKind);
1318 Cost += thisT()->getScalarizationOverhead(ReplicatedVT, DemandedDstElts,
1319 /*Insert*/ true,
1320 /*Extract*/ false, CostKind);
1321
1322 return Cost;
1323 }
1324
1326 getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment,
1329 const Instruction *I = nullptr) {
1330 assert(!Src->isVoidTy() && "Invalid type");
1331 // Assume types, such as structs, are expensive.
1332 if (getTLI()->getValueType(DL, Src, true) == MVT::Other)
1333 return 4;
1334 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Src);
1335
1336 // Assuming that all loads of legal types cost 1.
1337 InstructionCost Cost = LT.first;
1339 return Cost;
1340
1341 const DataLayout &DL = this->getDataLayout();
1342 if (Src->isVectorTy() &&
1343 // In practice it's not currently possible to have a change in lane
1344 // length for extending loads or truncating stores so both types should
1345 // have the same scalable property.
1347 LT.second.getSizeInBits())) {
1348 // This is a vector load that legalizes to a larger type than the vector
1349 // itself. Unless the corresponding extending load or truncating store is
1350 // legal, then this will scalarize.
1352 EVT MemVT = getTLI()->getValueType(DL, Src);
1353 if (Opcode == Instruction::Store)
1354 LA = getTLI()->getTruncStoreAction(LT.second, MemVT);
1355 else
1356 LA = getTLI()->getLoadExtAction(ISD::EXTLOAD, LT.second, MemVT);
1357
1358 if (LA != TargetLowering::Legal && LA != TargetLowering::Custom) {
1359 // This is a vector load/store for some illegal type that is scalarized.
1360 // We must account for the cost of building or decomposing the vector.
1362 cast<VectorType>(Src), Opcode != Instruction::Store,
1363 Opcode == Instruction::Store, CostKind);
1364 }
1365 }
1366
1367 return Cost;
1368 }
1369
1371 Align Alignment, unsigned AddressSpace,
1373 // TODO: Pass on AddressSpace when we have test coverage.
1374 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment, true, false,
1375 CostKind);
1376 }
1377
1379 const Value *Ptr, bool VariableMask,
1380 Align Alignment,
1382 const Instruction *I = nullptr) {
1383 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment, VariableMask,
1384 true, CostKind);
1385 }
1386
1388 const Value *Ptr, bool VariableMask,
1389 Align Alignment,
1391 const Instruction *I) {
1392 // For a target without strided memory operations (or for an illegal
1393 // operation type on one which does), assume we lower to a gather/scatter
1394 // operation. (Which may in turn be scalarized.)
1395 return thisT()->getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
1396 Alignment, CostKind, I);
1397 }
1398
1400 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
1401 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
1402 bool UseMaskForCond = false, bool UseMaskForGaps = false) {
1403
1404 // We cannot scalarize scalable vectors, so return Invalid.
1405 if (isa<ScalableVectorType>(VecTy))
1407
1408 auto *VT = cast<FixedVectorType>(VecTy);
1409
1410 unsigned NumElts = VT->getNumElements();
1411 assert(Factor > 1 && NumElts % Factor == 0 && "Invalid interleave factor");
1412
1413 unsigned NumSubElts = NumElts / Factor;
1414 auto *SubVT = FixedVectorType::get(VT->getElementType(), NumSubElts);
1415
1416 // Firstly, the cost of load/store operation.
1418 if (UseMaskForCond || UseMaskForGaps)
1419 Cost = thisT()->getMaskedMemoryOpCost(Opcode, VecTy, Alignment,
1421 else
1422 Cost = thisT()->getMemoryOpCost(Opcode, VecTy, Alignment, AddressSpace,
1423 CostKind);
1424
1425 // Legalize the vector type, and get the legalized and unlegalized type
1426 // sizes.
1427 MVT VecTyLT = getTypeLegalizationCost(VecTy).second;
1428 unsigned VecTySize = thisT()->getDataLayout().getTypeStoreSize(VecTy);
1429 unsigned VecTyLTSize = VecTyLT.getStoreSize();
1430
1431 // Scale the cost of the memory operation by the fraction of legalized
1432 // instructions that will actually be used. We shouldn't account for the
1433 // cost of dead instructions since they will be removed.
1434 //
1435 // E.g., An interleaved load of factor 8:
1436 // %vec = load <16 x i64>, <16 x i64>* %ptr
1437 // %v0 = shufflevector %vec, undef, <0, 8>
1438 //
1439 // If <16 x i64> is legalized to 8 v2i64 loads, only 2 of the loads will be
1440 // used (those corresponding to elements [0:1] and [8:9] of the unlegalized
1441 // type). The other loads are unused.
1442 //
1443 // TODO: Note that legalization can turn masked loads/stores into unmasked
1444 // (legalized) loads/stores. This can be reflected in the cost.
1445 if (Cost.isValid() && VecTySize > VecTyLTSize) {
1446 // The number of loads of a legal type it will take to represent a load
1447 // of the unlegalized vector type.
1448 unsigned NumLegalInsts = divideCeil(VecTySize, VecTyLTSize);
1449
1450 // The number of elements of the unlegalized type that correspond to a
1451 // single legal instruction.
1452 unsigned NumEltsPerLegalInst = divideCeil(NumElts, NumLegalInsts);
1453
1454 // Determine which legal instructions will be used.
1455 BitVector UsedInsts(NumLegalInsts, false);
1456 for (unsigned Index : Indices)
1457 for (unsigned Elt = 0; Elt < NumSubElts; ++Elt)
1458 UsedInsts.set((Index + Elt * Factor) / NumEltsPerLegalInst);
1459
1460 // Scale the cost of the load by the fraction of legal instructions that
1461 // will be used.
1462 Cost = divideCeil(UsedInsts.count() * *Cost.getValue(), NumLegalInsts);
1463 }
1464
1465 // Then plus the cost of interleave operation.
1466 assert(Indices.size() <= Factor &&
1467 "Interleaved memory op has too many members");
1468
1469 const APInt DemandedAllSubElts = APInt::getAllOnes(NumSubElts);
1470 const APInt DemandedAllResultElts = APInt::getAllOnes(NumElts);
1471
1472 APInt DemandedLoadStoreElts = APInt::getZero(NumElts);
1473 for (unsigned Index : Indices) {
1474 assert(Index < Factor && "Invalid index for interleaved memory op");
1475 for (unsigned Elm = 0; Elm < NumSubElts; Elm++)
1476 DemandedLoadStoreElts.setBit(Index + Elm * Factor);
1477 }
1478
1479 if (Opcode == Instruction::Load) {
1480 // The interleave cost is similar to extract sub vectors' elements
1481 // from the wide vector, and insert them into sub vectors.
1482 //
1483 // E.g. An interleaved load of factor 2 (with one member of index 0):
1484 // %vec = load <8 x i32>, <8 x i32>* %ptr
1485 // %v0 = shuffle %vec, undef, <0, 2, 4, 6> ; Index 0
1486 // The cost is estimated as extract elements at 0, 2, 4, 6 from the
1487 // <8 x i32> vector and insert them into a <4 x i32> vector.
1488 InstructionCost InsSubCost = thisT()->getScalarizationOverhead(
1489 SubVT, DemandedAllSubElts,
1490 /*Insert*/ true, /*Extract*/ false, CostKind);
1491 Cost += Indices.size() * InsSubCost;
1492 Cost += thisT()->getScalarizationOverhead(VT, DemandedLoadStoreElts,
1493 /*Insert*/ false,
1494 /*Extract*/ true, CostKind);
1495 } else {
1496 // The interleave cost is extract elements from sub vectors, and
1497 // insert them into the wide vector.
1498 //
1499 // E.g. An interleaved store of factor 3 with 2 members at indices 0,1:
1500 // (using VF=4):
1501 // %v0_v1 = shuffle %v0, %v1, <0,4,undef,1,5,undef,2,6,undef,3,7,undef>
1502 // %gaps.mask = <true, true, false, true, true, false,
1503 // true, true, false, true, true, false>
1504 // call llvm.masked.store <12 x i32> %v0_v1, <12 x i32>* %ptr,
1505 // i32 Align, <12 x i1> %gaps.mask
1506 // The cost is estimated as extract all elements (of actual members,
1507 // excluding gaps) from both <4 x i32> vectors and insert into the <12 x
1508 // i32> vector.
1509 InstructionCost ExtSubCost = thisT()->getScalarizationOverhead(
1510 SubVT, DemandedAllSubElts,
1511 /*Insert*/ false, /*Extract*/ true, CostKind);
1512 Cost += ExtSubCost * Indices.size();
1513 Cost += thisT()->getScalarizationOverhead(VT, DemandedLoadStoreElts,
1514 /*Insert*/ true,
1515 /*Extract*/ false, CostKind);
1516 }
1517
1518 if (!UseMaskForCond)
1519 return Cost;
1520
1521 Type *I8Type = Type::getInt8Ty(VT->getContext());
1522
1523 Cost += thisT()->getReplicationShuffleCost(
1524 I8Type, Factor, NumSubElts,
1525 UseMaskForGaps ? DemandedLoadStoreElts : DemandedAllResultElts,
1526 CostKind);
1527
1528 // The Gaps mask is invariant and created outside the loop, therefore the
1529 // cost of creating it is not accounted for here. However if we have both
1530 // a MaskForGaps and some other mask that guards the execution of the
1531 // memory access, we need to account for the cost of And-ing the two masks
1532 // inside the loop.
1533 if (UseMaskForGaps) {
1534 auto *MaskVT = FixedVectorType::get(I8Type, NumElts);
1535 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::And, MaskVT,
1536 CostKind);
1537 }
1538
1539 return Cost;
1540 }
1541
1542 /// Get intrinsic cost based on arguments.
1545 // Check for generically free intrinsics.
1547 return 0;
1548
1549 // Assume that target intrinsics are cheap.
1550 Intrinsic::ID IID = ICA.getID();
1553
1554 if (ICA.isTypeBasedOnly())
1556
1557 Type *RetTy = ICA.getReturnType();
1558
1559 ElementCount RetVF =
1560 (RetTy->isVectorTy() ? cast<VectorType>(RetTy)->getElementCount()
1562 const IntrinsicInst *I = ICA.getInst();
1563 const SmallVectorImpl<const Value *> &Args = ICA.getArgs();
1564 FastMathFlags FMF = ICA.getFlags();
1565 switch (IID) {
1566 default:
1567 break;
1568
1569 case Intrinsic::powi:
1570 if (auto *RHSC = dyn_cast<ConstantInt>(Args[1])) {
1571 bool ShouldOptForSize = I->getParent()->getParent()->hasOptSize();
1572 if (getTLI()->isBeneficialToExpandPowI(RHSC->getSExtValue(),
1573 ShouldOptForSize)) {
1574 // The cost is modeled on the expansion performed by ExpandPowI in
1575 // SelectionDAGBuilder.
1576 APInt Exponent = RHSC->getValue().abs();
1577 unsigned ActiveBits = Exponent.getActiveBits();
1578 unsigned PopCount = Exponent.popcount();
1579 InstructionCost Cost = (ActiveBits + PopCount - 2) *
1580 thisT()->getArithmeticInstrCost(
1581 Instruction::FMul, RetTy, CostKind);
1582 if (RHSC->isNegative())
1583 Cost += thisT()->getArithmeticInstrCost(Instruction::FDiv, RetTy,
1584 CostKind);
1585 return Cost;
1586 }
1587 }
1588 break;
1589 case Intrinsic::cttz:
1590 // FIXME: If necessary, this should go in target-specific overrides.
1591 if (RetVF.isScalar() && getTLI()->isCheapToSpeculateCttz(RetTy))
1593 break;
1594
1595 case Intrinsic::ctlz:
1596 // FIXME: If necessary, this should go in target-specific overrides.
1597 if (RetVF.isScalar() && getTLI()->isCheapToSpeculateCtlz(RetTy))
1599 break;
1600
1601 case Intrinsic::memcpy:
1602 return thisT()->getMemcpyCost(ICA.getInst());
1603
1604 case Intrinsic::masked_scatter: {
1605 const Value *Mask = Args[3];
1606 bool VarMask = !isa<Constant>(Mask);
1607 Align Alignment = cast<ConstantInt>(Args[2])->getAlignValue();
1608 return thisT()->getGatherScatterOpCost(Instruction::Store,
1609 ICA.getArgTypes()[0], Args[1],
1610 VarMask, Alignment, CostKind, I);
1611 }
1612 case Intrinsic::masked_gather: {
1613 const Value *Mask = Args[2];
1614 bool VarMask = !isa<Constant>(Mask);
1615 Align Alignment = cast<ConstantInt>(Args[1])->getAlignValue();
1616 return thisT()->getGatherScatterOpCost(Instruction::Load, RetTy, Args[0],
1617 VarMask, Alignment, CostKind, I);
1618 }
1619 case Intrinsic::experimental_vp_strided_store: {
1620 const Value *Data = Args[0];
1621 const Value *Ptr = Args[1];
1622 const Value *Mask = Args[3];
1623 const Value *EVL = Args[4];
1624 bool VarMask = !isa<Constant>(Mask) || !isa<Constant>(EVL);
1625 Align Alignment = I->getParamAlign(1).valueOrOne();
1626 return thisT()->getStridedMemoryOpCost(Instruction::Store,
1627 Data->getType(), Ptr, VarMask,
1628 Alignment, CostKind, I);
1629 }
1630 case Intrinsic::experimental_vp_strided_load: {
1631 const Value *Ptr = Args[0];
1632 const Value *Mask = Args[2];
1633 const Value *EVL = Args[3];
1634 bool VarMask = !isa<Constant>(Mask) || !isa<Constant>(EVL);
1635 Align Alignment = I->getParamAlign(0).valueOrOne();
1636 return thisT()->getStridedMemoryOpCost(Instruction::Load, RetTy, Ptr,
1637 VarMask, Alignment, CostKind, I);
1638 }
1639 case Intrinsic::experimental_stepvector: {
1640 if (isa<ScalableVectorType>(RetTy))
1642 // The cost of materialising a constant integer vector.
1644 }
1645 case Intrinsic::vector_extract: {
1646 // FIXME: Handle case where a scalable vector is extracted from a scalable
1647 // vector
1648 if (isa<ScalableVectorType>(RetTy))
1650 unsigned Index = cast<ConstantInt>(Args[1])->getZExtValue();
1651 return thisT()->getShuffleCost(
1652 TTI::SK_ExtractSubvector, cast<VectorType>(Args[0]->getType()),
1653 std::nullopt, CostKind, Index, cast<VectorType>(RetTy));
1654 }
1655 case Intrinsic::vector_insert: {
1656 // FIXME: Handle case where a scalable vector is inserted into a scalable
1657 // vector
1658 if (isa<ScalableVectorType>(Args[1]->getType()))
1660 unsigned Index = cast<ConstantInt>(Args[2])->getZExtValue();
1661 return thisT()->getShuffleCost(
1662 TTI::SK_InsertSubvector, cast<VectorType>(Args[0]->getType()),
1663 std::nullopt, CostKind, Index, cast<VectorType>(Args[1]->getType()));
1664 }
1665 case Intrinsic::experimental_vector_reverse: {
1666 return thisT()->getShuffleCost(
1667 TTI::SK_Reverse, cast<VectorType>(Args[0]->getType()), std::nullopt,
1668 CostKind, 0, cast<VectorType>(RetTy));
1669 }
1670 case Intrinsic::experimental_vector_splice: {
1671 unsigned Index = cast<ConstantInt>(Args[2])->getZExtValue();
1672 return thisT()->getShuffleCost(
1673 TTI::SK_Splice, cast<VectorType>(Args[0]->getType()), std::nullopt,
1674 CostKind, Index, cast<VectorType>(RetTy));
1675 }
1676 case Intrinsic::vector_reduce_add:
1677 case Intrinsic::vector_reduce_mul:
1678 case Intrinsic::vector_reduce_and:
1679 case Intrinsic::vector_reduce_or:
1680 case Intrinsic::vector_reduce_xor:
1681 case Intrinsic::vector_reduce_smax:
1682 case Intrinsic::vector_reduce_smin:
1683 case Intrinsic::vector_reduce_fmax:
1684 case Intrinsic::vector_reduce_fmin:
1685 case Intrinsic::vector_reduce_fmaximum:
1686 case Intrinsic::vector_reduce_fminimum:
1687 case Intrinsic::vector_reduce_umax:
1688 case Intrinsic::vector_reduce_umin: {
1689 IntrinsicCostAttributes Attrs(IID, RetTy, Args[0]->getType(), FMF, I, 1);
1691 }
1692 case Intrinsic::vector_reduce_fadd:
1693 case Intrinsic::vector_reduce_fmul: {
1695 IID, RetTy, {Args[0]->getType(), Args[1]->getType()}, FMF, I, 1);
1697 }
1698 case Intrinsic::fshl:
1699 case Intrinsic::fshr: {
1700 const Value *X = Args[0];
1701 const Value *Y = Args[1];
1702 const Value *Z = Args[2];
1705 const TTI::OperandValueInfo OpInfoZ = TTI::getOperandInfo(Z);
1706 const TTI::OperandValueInfo OpInfoBW =
1708 isPowerOf2_32(RetTy->getScalarSizeInBits()) ? TTI::OP_PowerOf2
1709 : TTI::OP_None};
1710
1711 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
1712 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
1714 Cost +=
1715 thisT()->getArithmeticInstrCost(BinaryOperator::Or, RetTy, CostKind);
1716 Cost +=
1717 thisT()->getArithmeticInstrCost(BinaryOperator::Sub, RetTy, CostKind);
1718 Cost += thisT()->getArithmeticInstrCost(
1719 BinaryOperator::Shl, RetTy, CostKind, OpInfoX,
1720 {OpInfoZ.Kind, TTI::OP_None});
1721 Cost += thisT()->getArithmeticInstrCost(
1722 BinaryOperator::LShr, RetTy, CostKind, OpInfoY,
1723 {OpInfoZ.Kind, TTI::OP_None});
1724 // Non-constant shift amounts requires a modulo.
1725 if (!OpInfoZ.isConstant())
1726 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::URem, RetTy,
1727 CostKind, OpInfoZ, OpInfoBW);
1728 // For non-rotates (X != Y) we must add shift-by-zero handling costs.
1729 if (X != Y) {
1730 Type *CondTy = RetTy->getWithNewBitWidth(1);
1731 Cost +=
1732 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
1734 Cost +=
1735 thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
1737 }
1738 return Cost;
1739 }
1740 case Intrinsic::get_active_lane_mask: {
1741 EVT ResVT = getTLI()->getValueType(DL, RetTy, true);
1742 EVT ArgType = getTLI()->getValueType(DL, ICA.getArgTypes()[0], true);
1743
1744 // If we're not expanding the intrinsic then we assume this is cheap
1745 // to implement.
1746 if (!getTLI()->shouldExpandGetActiveLaneMask(ResVT, ArgType)) {
1747 return getTypeLegalizationCost(RetTy).first;
1748 }
1749
1750 // Create the expanded types that will be used to calculate the uadd_sat
1751 // operation.
1752 Type *ExpRetTy = VectorType::get(
1753 ICA.getArgTypes()[0], cast<VectorType>(RetTy)->getElementCount());
1754 IntrinsicCostAttributes Attrs(Intrinsic::uadd_sat, ExpRetTy, {}, FMF);
1756 thisT()->getTypeBasedIntrinsicInstrCost(Attrs, CostKind);
1757 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, ExpRetTy, RetTy,
1759 return Cost;
1760 }
1761 }
1762
1763 // VP Intrinsics should have the same cost as their non-vp counterpart.
1764 // TODO: Adjust the cost to make the vp intrinsic cheaper than its non-vp
1765 // counterpart when the vector length argument is smaller than the maximum
1766 // vector length.
1767 // TODO: Support other kinds of VPIntrinsics
1768 if (VPIntrinsic::isVPIntrinsic(ICA.getID())) {
1769 std::optional<unsigned> FOp =
1771 if (FOp) {
1772 if (ICA.getID() == Intrinsic::vp_load) {
1773 Align Alignment;
1774 if (auto *VPI = dyn_cast_or_null<VPIntrinsic>(ICA.getInst()))
1775 Alignment = VPI->getPointerAlignment().valueOrOne();
1776 unsigned AS = 0;
1777 if (ICA.getArgs().size() > 1)
1778 if (auto *PtrTy =
1779 dyn_cast<PointerType>(ICA.getArgs()[0]->getType()))
1780 AS = PtrTy->getAddressSpace();
1781 return thisT()->getMemoryOpCost(*FOp, ICA.getReturnType(), Alignment,
1782 AS, CostKind);
1783 }
1784 if (ICA.getID() == Intrinsic::vp_store) {
1785 Align Alignment;
1786 if (auto *VPI = dyn_cast_or_null<VPIntrinsic>(ICA.getInst()))
1787 Alignment = VPI->getPointerAlignment().valueOrOne();
1788 unsigned AS = 0;
1789 if (ICA.getArgs().size() >= 2)
1790 if (auto *PtrTy =
1791 dyn_cast<PointerType>(ICA.getArgs()[1]->getType()))
1792 AS = PtrTy->getAddressSpace();
1793 return thisT()->getMemoryOpCost(*FOp, Args[0]->getType(), Alignment,
1794 AS, CostKind);
1795 }
1797 return thisT()->getArithmeticInstrCost(*FOp, ICA.getReturnType(),
1798 CostKind);
1799 }
1800 }
1801
1802 std::optional<Intrinsic::ID> FID =
1804 if (FID) {
1805 // Non-vp version will have same Args/Tys except mask and vector length.
1806 assert(ICA.getArgs().size() >= 2 && ICA.getArgTypes().size() >= 2 &&
1807 "Expected VPIntrinsic to have Mask and Vector Length args and "
1808 "types");
1810
1811 // VPReduction intrinsics have a start value argument that their non-vp
1812 // counterparts do not have, except for the fadd and fmul non-vp
1813 // counterpart.
1815 *FID != Intrinsic::vector_reduce_fadd &&
1816 *FID != Intrinsic::vector_reduce_fmul)
1817 NewTys = NewTys.drop_front();
1818
1819 IntrinsicCostAttributes NewICA(*FID, ICA.getReturnType(), NewTys,
1820 ICA.getFlags());
1821 return thisT()->getIntrinsicInstrCost(NewICA, CostKind);
1822 }
1823 }
1824
1825 // Assume that we need to scalarize this intrinsic.)
1826 // Compute the scalarization overhead based on Args for a vector
1827 // intrinsic.
1828 InstructionCost ScalarizationCost = InstructionCost::getInvalid();
1829 if (RetVF.isVector() && !RetVF.isScalable()) {
1830 ScalarizationCost = 0;
1831 if (!RetTy->isVoidTy())
1832 ScalarizationCost += getScalarizationOverhead(
1833 cast<VectorType>(RetTy),
1834 /*Insert*/ true, /*Extract*/ false, CostKind);
1835 ScalarizationCost +=
1837 }
1838
1839 IntrinsicCostAttributes Attrs(IID, RetTy, ICA.getArgTypes(), FMF, I,
1840 ScalarizationCost);
1841 return thisT()->getTypeBasedIntrinsicInstrCost(Attrs, CostKind);
1842 }
1843
1844 /// Get intrinsic cost based on argument types.
1845 /// If ScalarizationCostPassed is std::numeric_limits<unsigned>::max(), the
1846 /// cost of scalarizing the arguments and the return value will be computed
1847 /// based on types.
1851 Intrinsic::ID IID = ICA.getID();
1852 Type *RetTy = ICA.getReturnType();
1853 const SmallVectorImpl<Type *> &Tys = ICA.getArgTypes();
1854 FastMathFlags FMF = ICA.getFlags();
1855 InstructionCost ScalarizationCostPassed = ICA.getScalarizationCost();
1856 bool SkipScalarizationCost = ICA.skipScalarizationCost();
1857
1858 VectorType *VecOpTy = nullptr;
1859 if (!Tys.empty()) {
1860 // The vector reduction operand is operand 0 except for fadd/fmul.
1861 // Their operand 0 is a scalar start value, so the vector op is operand 1.
1862 unsigned VecTyIndex = 0;
1863 if (IID == Intrinsic::vector_reduce_fadd ||
1864 IID == Intrinsic::vector_reduce_fmul)
1865 VecTyIndex = 1;
1866 assert(Tys.size() > VecTyIndex && "Unexpected IntrinsicCostAttributes");
1867 VecOpTy = dyn_cast<VectorType>(Tys[VecTyIndex]);
1868 }
1869
1870 // Library call cost - other than size, make it expensive.
1871 unsigned SingleCallCost = CostKind == TTI::TCK_CodeSize ? 1 : 10;
1872 unsigned ISD = 0;
1873 switch (IID) {
1874 default: {
1875 // Scalable vectors cannot be scalarized, so return Invalid.
1876 if (isa<ScalableVectorType>(RetTy) || any_of(Tys, [](const Type *Ty) {
1877 return isa<ScalableVectorType>(Ty);
1878 }))
1880
1881 // Assume that we need to scalarize this intrinsic.
1882 InstructionCost ScalarizationCost =
1883 SkipScalarizationCost ? ScalarizationCostPassed : 0;
1884 unsigned ScalarCalls = 1;
1885 Type *ScalarRetTy = RetTy;
1886 if (auto *RetVTy = dyn_cast<VectorType>(RetTy)) {
1887 if (!SkipScalarizationCost)
1888 ScalarizationCost = getScalarizationOverhead(
1889 RetVTy, /*Insert*/ true, /*Extract*/ false, CostKind);
1890 ScalarCalls = std::max(ScalarCalls,
1891 cast<FixedVectorType>(RetVTy)->getNumElements());
1892 ScalarRetTy = RetTy->getScalarType();
1893 }
1894 SmallVector<Type *, 4> ScalarTys;
1895 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
1896 Type *Ty = Tys[i];
1897 if (auto *VTy = dyn_cast<VectorType>(Ty)) {
1898 if (!SkipScalarizationCost)
1899 ScalarizationCost += getScalarizationOverhead(
1900 VTy, /*Insert*/ false, /*Extract*/ true, CostKind);
1901 ScalarCalls = std::max(ScalarCalls,
1902 cast<FixedVectorType>(VTy)->getNumElements());
1903 Ty = Ty->getScalarType();
1904 }
1905 ScalarTys.push_back(Ty);
1906 }
1907 if (ScalarCalls == 1)
1908 return 1; // Return cost of a scalar intrinsic. Assume it to be cheap.
1909
1910 IntrinsicCostAttributes ScalarAttrs(IID, ScalarRetTy, ScalarTys, FMF);
1911 InstructionCost ScalarCost =
1912 thisT()->getIntrinsicInstrCost(ScalarAttrs, CostKind);
1913
1914 return ScalarCalls * ScalarCost + ScalarizationCost;
1915 }
1916 // Look for intrinsics that can be lowered directly or turned into a scalar
1917 // intrinsic call.
1918 case Intrinsic::sqrt:
1919 ISD = ISD::FSQRT;
1920 break;
1921 case Intrinsic::sin:
1922 ISD = ISD::FSIN;
1923 break;
1924 case Intrinsic::cos:
1925 ISD = ISD::FCOS;
1926 break;
1927 case Intrinsic::exp:
1928 ISD = ISD::FEXP;
1929 break;
1930 case Intrinsic::exp2:
1931 ISD = ISD::FEXP2;
1932 break;
1933 case Intrinsic::exp10:
1934 ISD = ISD::FEXP10;
1935 break;
1936 case Intrinsic::log:
1937 ISD = ISD::FLOG;
1938 break;
1939 case Intrinsic::log10:
1940 ISD = ISD::FLOG10;
1941 break;
1942 case Intrinsic::log2:
1943 ISD = ISD::FLOG2;
1944 break;
1945 case Intrinsic::fabs:
1946 ISD = ISD::FABS;
1947 break;
1948 case Intrinsic::canonicalize:
1949 ISD = ISD::FCANONICALIZE;
1950 break;
1951 case Intrinsic::minnum:
1952 ISD = ISD::FMINNUM;
1953 break;
1954 case Intrinsic::maxnum:
1955 ISD = ISD::FMAXNUM;
1956 break;
1957 case Intrinsic::minimum:
1958 ISD = ISD::FMINIMUM;
1959 break;
1960 case Intrinsic::maximum:
1961 ISD = ISD::FMAXIMUM;
1962 break;
1963 case Intrinsic::copysign:
1964 ISD = ISD::FCOPYSIGN;
1965 break;
1966 case Intrinsic::floor:
1967 ISD = ISD::FFLOOR;
1968 break;
1969 case Intrinsic::ceil:
1970 ISD = ISD::FCEIL;
1971 break;
1972 case Intrinsic::trunc:
1973 ISD = ISD::FTRUNC;
1974 break;
1975 case Intrinsic::nearbyint:
1976 ISD = ISD::FNEARBYINT;
1977 break;
1978 case Intrinsic::rint:
1979 ISD = ISD::FRINT;
1980 break;
1981 case Intrinsic::lrint:
1982 ISD = ISD::LRINT;
1983 break;
1984 case Intrinsic::llrint:
1985 ISD = ISD::LLRINT;
1986 break;
1987 case Intrinsic::round:
1988 ISD = ISD::FROUND;
1989 break;
1990 case Intrinsic::roundeven:
1991 ISD = ISD::FROUNDEVEN;
1992 break;
1993 case Intrinsic::pow:
1994 ISD = ISD::FPOW;
1995 break;
1996 case Intrinsic::fma:
1997 ISD = ISD::FMA;
1998 break;
1999 case Intrinsic::fmuladd:
2000 ISD = ISD::FMA;
2001 break;
2002 case Intrinsic::experimental_constrained_fmuladd:
2003 ISD = ISD::STRICT_FMA;
2004 break;
2005 // FIXME: We should return 0 whenever getIntrinsicCost == TCC_Free.
2006 case Intrinsic::lifetime_start:
2007 case Intrinsic::lifetime_end:
2008 case Intrinsic::sideeffect:
2009 case Intrinsic::pseudoprobe:
2010 case Intrinsic::arithmetic_fence:
2011 return 0;
2012 case Intrinsic::masked_store: {
2013 Type *Ty = Tys[0];
2014 Align TyAlign = thisT()->DL.getABITypeAlign(Ty);
2015 return thisT()->getMaskedMemoryOpCost(Instruction::Store, Ty, TyAlign, 0,
2016 CostKind);
2017 }
2018 case Intrinsic::masked_load: {
2019 Type *Ty = RetTy;
2020 Align TyAlign = thisT()->DL.getABITypeAlign(Ty);
2021 return thisT()->getMaskedMemoryOpCost(Instruction::Load, Ty, TyAlign, 0,
2022 CostKind);
2023 }
2024 case Intrinsic::vector_reduce_add:
2025 case Intrinsic::vector_reduce_mul:
2026 case Intrinsic::vector_reduce_and:
2027 case Intrinsic::vector_reduce_or:
2028 case Intrinsic::vector_reduce_xor:
2029 return thisT()->getArithmeticReductionCost(
2030 getArithmeticReductionInstruction(IID), VecOpTy, std::nullopt,
2031 CostKind);
2032 case Intrinsic::vector_reduce_fadd:
2033 case Intrinsic::vector_reduce_fmul:
2034 return thisT()->getArithmeticReductionCost(
2035 getArithmeticReductionInstruction(IID), VecOpTy, FMF, CostKind);
2036 case Intrinsic::vector_reduce_smax:
2037 case Intrinsic::vector_reduce_smin:
2038 case Intrinsic::vector_reduce_umax:
2039 case Intrinsic::vector_reduce_umin:
2040 case Intrinsic::vector_reduce_fmax:
2041 case Intrinsic::vector_reduce_fmin:
2042 case Intrinsic::vector_reduce_fmaximum:
2043 case Intrinsic::vector_reduce_fminimum:
2044 return thisT()->getMinMaxReductionCost(getMinMaxReductionIntrinsicOp(IID),
2045 VecOpTy, ICA.getFlags(), CostKind);
2046 case Intrinsic::abs: {
2047 // abs(X) = select(icmp(X,0),X,sub(0,X))
2048 Type *CondTy = RetTy->getWithNewBitWidth(1);
2051 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2052 Pred, CostKind);
2053 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2054 Pred, CostKind);
2055 // TODO: Should we add an OperandValueProperties::OP_Zero property?
2056 Cost += thisT()->getArithmeticInstrCost(
2057 BinaryOperator::Sub, RetTy, CostKind, {TTI::OK_UniformConstantValue, TTI::OP_None});
2058 return Cost;
2059 }
2060 case Intrinsic::smax:
2061 case Intrinsic::smin:
2062 case Intrinsic::umax:
2063 case Intrinsic::umin: {
2064 // minmax(X,Y) = select(icmp(X,Y),X,Y)
2065 Type *CondTy = RetTy->getWithNewBitWidth(1);
2066 bool IsUnsigned = IID == Intrinsic::umax || IID == Intrinsic::umin;
2067 CmpInst::Predicate Pred =
2068 IsUnsigned ? CmpInst::ICMP_UGT : CmpInst::ICMP_SGT;
2070 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2071 Pred, CostKind);
2072 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2073 Pred, CostKind);
2074 return Cost;
2075 }
2076 case Intrinsic::sadd_sat:
2077 case Intrinsic::ssub_sat: {
2078 Type *CondTy = RetTy->getWithNewBitWidth(1);
2079
2080 Type *OpTy = StructType::create({RetTy, CondTy});
2081 Intrinsic::ID OverflowOp = IID == Intrinsic::sadd_sat
2082 ? Intrinsic::sadd_with_overflow
2083 : Intrinsic::ssub_with_overflow;
2085
2086 // SatMax -> Overflow && SumDiff < 0
2087 // SatMin -> Overflow && SumDiff >= 0
2089 IntrinsicCostAttributes Attrs(OverflowOp, OpTy, {RetTy, RetTy}, FMF,
2090 nullptr, ScalarizationCostPassed);
2091 Cost += thisT()->getIntrinsicInstrCost(Attrs, CostKind);
2092 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2093 Pred, CostKind);
2094 Cost += 2 * thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy,
2095 CondTy, Pred, CostKind);
2096 return Cost;
2097 }
2098 case Intrinsic::uadd_sat:
2099 case Intrinsic::usub_sat: {
2100 Type *CondTy = RetTy->getWithNewBitWidth(1);
2101
2102 Type *OpTy = StructType::create({RetTy, CondTy});
2103 Intrinsic::ID OverflowOp = IID == Intrinsic::uadd_sat
2104 ? Intrinsic::uadd_with_overflow
2105 : Intrinsic::usub_with_overflow;
2106
2108 IntrinsicCostAttributes Attrs(OverflowOp, OpTy, {RetTy, RetTy}, FMF,
2109 nullptr, ScalarizationCostPassed);
2110 Cost += thisT()->getIntrinsicInstrCost(Attrs, CostKind);
2111 Cost +=
2112 thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2114 return Cost;
2115 }
2116 case Intrinsic::smul_fix:
2117 case Intrinsic::umul_fix: {
2118 unsigned ExtSize = RetTy->getScalarSizeInBits() * 2;
2119 Type *ExtTy = RetTy->getWithNewBitWidth(ExtSize);
2120
2121 unsigned ExtOp =
2122 IID == Intrinsic::smul_fix ? Instruction::SExt : Instruction::ZExt;
2124
2126 Cost += 2 * thisT()->getCastInstrCost(ExtOp, ExtTy, RetTy, CCH, CostKind);
2127 Cost +=
2128 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy, CostKind);
2129 Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc, RetTy, ExtTy,
2130 CCH, CostKind);
2131 Cost += thisT()->getArithmeticInstrCost(Instruction::LShr, RetTy,
2132 CostKind,
2135 Cost += thisT()->getArithmeticInstrCost(Instruction::Shl, RetTy, CostKind,
2138 Cost += thisT()->getArithmeticInstrCost(Instruction::Or, RetTy, CostKind);
2139 return Cost;
2140 }
2141 case Intrinsic::sadd_with_overflow:
2142 case Intrinsic::ssub_with_overflow: {
2143 Type *SumTy = RetTy->getContainedType(0);
2144 Type *OverflowTy = RetTy->getContainedType(1);
2145 unsigned Opcode = IID == Intrinsic::sadd_with_overflow
2146 ? BinaryOperator::Add
2147 : BinaryOperator::Sub;
2148
2149 // Add:
2150 // Overflow -> (Result < LHS) ^ (RHS < 0)
2151 // Sub:
2152 // Overflow -> (Result < LHS) ^ (RHS > 0)
2154 Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy, CostKind);
2155 Cost += 2 * thisT()->getCmpSelInstrCost(
2156 Instruction::ICmp, SumTy, OverflowTy,
2158 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::Xor, OverflowTy,
2159 CostKind);
2160 return Cost;
2161 }
2162 case Intrinsic::uadd_with_overflow:
2163 case Intrinsic::usub_with_overflow: {
2164 Type *SumTy = RetTy->getContainedType(0);
2165 Type *OverflowTy = RetTy->getContainedType(1);
2166 unsigned Opcode = IID == Intrinsic::uadd_with_overflow
2167 ? BinaryOperator::Add
2168 : BinaryOperator::Sub;
2169 CmpInst::Predicate Pred = IID == Intrinsic::uadd_with_overflow
2172
2174 Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy, CostKind);
2175 Cost +=
2176 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, SumTy, OverflowTy,
2177 Pred, CostKind);
2178 return Cost;
2179 }
2180 case Intrinsic::smul_with_overflow:
2181 case Intrinsic::umul_with_overflow: {
2182 Type *MulTy = RetTy->getContainedType(0);
2183 Type *OverflowTy = RetTy->getContainedType(1);
2184 unsigned ExtSize = MulTy->getScalarSizeInBits() * 2;
2185 Type *ExtTy = MulTy->getWithNewBitWidth(ExtSize);
2186 bool IsSigned = IID == Intrinsic::smul_with_overflow;
2187
2188 unsigned ExtOp = IsSigned ? Instruction::SExt : Instruction::ZExt;
2190
2192 Cost += 2 * thisT()->getCastInstrCost(ExtOp, ExtTy, MulTy, CCH, CostKind);
2193 Cost +=
2194 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy, CostKind);
2195 Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc, MulTy, ExtTy,
2196 CCH, CostKind);
2197 Cost += thisT()->getArithmeticInstrCost(Instruction::LShr, ExtTy,
2198 CostKind,
2201
2202 if (IsSigned)
2203 Cost += thisT()->getArithmeticInstrCost(Instruction::AShr, MulTy,
2204 CostKind,
2207
2208 Cost += thisT()->getCmpSelInstrCost(
2209 BinaryOperator::ICmp, MulTy, OverflowTy, CmpInst::ICMP_NE, CostKind);
2210 return Cost;
2211 }
2212 case Intrinsic::fptosi_sat:
2213 case Intrinsic::fptoui_sat: {
2214 if (Tys.empty())
2215 break;
2216 Type *FromTy = Tys[0];
2217 bool IsSigned = IID == Intrinsic::fptosi_sat;
2218
2220 IntrinsicCostAttributes Attrs1(Intrinsic::minnum, FromTy,
2221 {FromTy, FromTy});
2222 Cost += thisT()->getIntrinsicInstrCost(Attrs1, CostKind);
2223 IntrinsicCostAttributes Attrs2(Intrinsic::maxnum, FromTy,
2224 {FromTy, FromTy});
2225 Cost += thisT()->getIntrinsicInstrCost(Attrs2, CostKind);
2226 Cost += thisT()->getCastInstrCost(
2227 IsSigned ? Instruction::FPToSI : Instruction::FPToUI, RetTy, FromTy,
2229 if (IsSigned) {
2230 Type *CondTy = RetTy->getWithNewBitWidth(1);
2231 Cost += thisT()->getCmpSelInstrCost(
2232 BinaryOperator::FCmp, FromTy, CondTy, CmpInst::FCMP_UNO, CostKind);
2233 Cost += thisT()->getCmpSelInstrCost(
2234 BinaryOperator::Select, RetTy, CondTy, CmpInst::FCMP_UNO, CostKind);
2235 }
2236 return Cost;
2237 }
2238 case Intrinsic::ctpop:
2239 ISD = ISD::CTPOP;
2240 // In case of legalization use TCC_Expensive. This is cheaper than a
2241 // library call but still not a cheap instruction.
2242 SingleCallCost = TargetTransformInfo::TCC_Expensive;
2243 break;
2244 case Intrinsic::ctlz:
2245 ISD = ISD::CTLZ;
2246 break;
2247 case Intrinsic::cttz:
2248 ISD = ISD::CTTZ;
2249 break;
2250 case Intrinsic::bswap:
2251 ISD = ISD::BSWAP;
2252 break;
2253 case Intrinsic::bitreverse:
2254 ISD = ISD::BITREVERSE;
2255 break;
2256 }
2257
2258 const TargetLoweringBase *TLI = getTLI();
2259 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(RetTy);
2260
2261 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
2262 if (IID == Intrinsic::fabs && LT.second.isFloatingPoint() &&
2263 TLI->isFAbsFree(LT.second)) {
2264 return 0;
2265 }
2266
2267 // The operation is legal. Assume it costs 1.
2268 // If the type is split to multiple registers, assume that there is some
2269 // overhead to this.
2270 // TODO: Once we have extract/insert subvector cost we need to use them.
2271 if (LT.first > 1)
2272 return (LT.first * 2);
2273 else
2274 return (LT.first * 1);
2275 } else if (!TLI->isOperationExpand(ISD, LT.second)) {
2276 // If the operation is custom lowered then assume
2277 // that the code is twice as expensive.
2278 return (LT.first * 2);
2279 }
2280
2281 // If we can't lower fmuladd into an FMA estimate the cost as a floating
2282 // point mul followed by an add.
2283 if (IID == Intrinsic::fmuladd)
2284 return thisT()->getArithmeticInstrCost(BinaryOperator::FMul, RetTy,
2285 CostKind) +
2286 thisT()->getArithmeticInstrCost(BinaryOperator::FAdd, RetTy,
2287 CostKind);
2288 if (IID == Intrinsic::experimental_constrained_fmuladd) {
2289 IntrinsicCostAttributes FMulAttrs(
2290 Intrinsic::experimental_constrained_fmul, RetTy, Tys);
2291 IntrinsicCostAttributes FAddAttrs(
2292 Intrinsic::experimental_constrained_fadd, RetTy, Tys);
2293 return thisT()->getIntrinsicInstrCost(FMulAttrs, CostKind) +
2294 thisT()->getIntrinsicInstrCost(FAddAttrs, CostKind);
2295 }
2296
2297 // Else, assume that we need to scalarize this intrinsic. For math builtins
2298 // this will emit a costly libcall, adding call overhead and spills. Make it
2299 // very expensive.
2300 if (auto *RetVTy = dyn_cast<VectorType>(RetTy)) {
2301 // Scalable vectors cannot be scalarized, so return Invalid.
2302 if (isa<ScalableVectorType>(RetTy) || any_of(Tys, [](const Type *Ty) {
2303 return isa<ScalableVectorType>(Ty);
2304 }))
2306
2307 InstructionCost ScalarizationCost =
2308 SkipScalarizationCost
2309 ? ScalarizationCostPassed
2310 : getScalarizationOverhead(RetVTy, /*Insert*/ true,
2311 /*Extract*/ false, CostKind);
2312
2313 unsigned ScalarCalls = cast<FixedVectorType>(RetVTy)->getNumElements();
2314 SmallVector<Type *, 4> ScalarTys;
2315 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
2316 Type *Ty = Tys[i];
2317 if (Ty->isVectorTy())
2318 Ty = Ty->getScalarType();
2319 ScalarTys.push_back(Ty);
2320 }
2321 IntrinsicCostAttributes Attrs(IID, RetTy->getScalarType(), ScalarTys, FMF);
2322 InstructionCost ScalarCost =
2323 thisT()->getIntrinsicInstrCost(Attrs, CostKind);
2324 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
2325 if (auto *VTy = dyn_cast<VectorType>(Tys[i])) {
2326 if (!ICA.skipScalarizationCost())
2327 ScalarizationCost += getScalarizationOverhead(
2328 VTy, /*Insert*/ false, /*Extract*/ true, CostKind);
2329 ScalarCalls = std::max(ScalarCalls,
2330 cast<FixedVectorType>(VTy)->getNumElements());
2331 }
2332 }
2333 return ScalarCalls * ScalarCost + ScalarizationCost;
2334 }
2335
2336 // This is going to be turned into a library call, make it expensive.
2337 return SingleCallCost;
2338 }
2339
2340 /// Compute a cost of the given call instruction.
2341 ///
2342 /// Compute the cost of calling function F with return type RetTy and
2343 /// argument types Tys. F might be nullptr, in this case the cost of an
2344 /// arbitrary call with the specified signature will be returned.
2345 /// This is used, for instance, when we estimate call of a vector
2346 /// counterpart of the given function.
2347 /// \param F Called function, might be nullptr.
2348 /// \param RetTy Return value types.
2349 /// \param Tys Argument types.
2350 /// \returns The cost of Call instruction.
2352 ArrayRef<Type *> Tys,
2354 return 10;
2355 }
2356
2357 unsigned getNumberOfParts(Type *Tp) {
2358 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Tp);
2359 return LT.first.isValid() ? *LT.first.getValue() : 0;
2360 }
2361
2363 const SCEV *) {
2364 return 0;
2365 }
2366
2367 /// Try to calculate arithmetic and shuffle op costs for reduction intrinsics.
2368 /// We're assuming that reduction operation are performing the following way:
2369 ///
2370 /// %val1 = shufflevector<n x t> %val, <n x t> %undef,
2371 /// <n x i32> <i32 n/2, i32 n/2 + 1, ..., i32 n, i32 undef, ..., i32 undef>
2372 /// \----------------v-------------/ \----------v------------/
2373 /// n/2 elements n/2 elements
2374 /// %red1 = op <n x t> %val, <n x t> val1
2375 /// After this operation we have a vector %red1 where only the first n/2
2376 /// elements are meaningful, the second n/2 elements are undefined and can be
2377 /// dropped. All other operations are actually working with the vector of
2378 /// length n/2, not n, though the real vector length is still n.
2379 /// %val2 = shufflevector<n x t> %red1, <n x t> %undef,
2380 /// <n x i32> <i32 n/4, i32 n/4 + 1, ..., i32 n/2, i32 undef, ..., i32 undef>
2381 /// \----------------v-------------/ \----------v------------/
2382 /// n/4 elements 3*n/4 elements
2383 /// %red2 = op <n x t> %red1, <n x t> val2 - working with the vector of
2384 /// length n/2, the resulting vector has length n/4 etc.
2385 ///
2386 /// The cost model should take into account that the actual length of the
2387 /// vector is reduced on each iteration.
2390 // Targets must implement a default value for the scalable case, since
2391 // we don't know how many lanes the vector has.
2392 if (isa<ScalableVectorType>(Ty))
2394
2395 Type *ScalarTy = Ty->getElementType();
2396 unsigned NumVecElts = cast<FixedVectorType>(Ty)->getNumElements();
2397 if ((Opcode == Instruction::Or || Opcode == Instruction::And) &&
2398 ScalarTy == IntegerType::getInt1Ty(Ty->getContext()) &&
2399 NumVecElts >= 2) {
2400 // Or reduction for i1 is represented as:
2401 // %val = bitcast <ReduxWidth x i1> to iReduxWidth
2402 // %res = cmp ne iReduxWidth %val, 0
2403 // And reduction for i1 is represented as:
2404 // %val = bitcast <ReduxWidth x i1> to iReduxWidth
2405 // %res = cmp eq iReduxWidth %val, 11111
2406 Type *ValTy = IntegerType::get(Ty->getContext(), NumVecElts);
2407 return thisT()->getCastInstrCost(Instruction::BitCast, ValTy, Ty,
2409 thisT()->getCmpSelInstrCost(Instruction::ICmp, ValTy,
2412 }
2413 unsigned NumReduxLevels = Log2_32(NumVecElts);
2414 InstructionCost ArithCost = 0;
2415 InstructionCost ShuffleCost = 0;
2416 std::pair<InstructionCost, MVT> LT = thisT()->getTypeLegalizationCost(Ty);
2417 unsigned LongVectorCount = 0;
2418 unsigned MVTLen =
2419 LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
2420 while (NumVecElts > MVTLen) {
2421 NumVecElts /= 2;
2422 VectorType *SubTy = FixedVectorType::get(ScalarTy, NumVecElts);
2423 ShuffleCost +=
2424 thisT()->getShuffleCost(TTI::SK_ExtractSubvector, Ty, std::nullopt,
2425 CostKind, NumVecElts, SubTy);
2426 ArithCost += thisT()->getArithmeticInstrCost(Opcode, SubTy, CostKind);
2427 Ty = SubTy;
2428 ++LongVectorCount;
2429 }
2430
2431 NumReduxLevels -= LongVectorCount;
2432
2433 // The minimal length of the vector is limited by the real length of vector
2434 // operations performed on the current platform. That's why several final
2435 // reduction operations are performed on the vectors with the same
2436 // architecture-dependent length.
2437
2438 // By default reductions need one shuffle per reduction level.
2439 ShuffleCost +=
2440 NumReduxLevels * thisT()->getShuffleCost(TTI::SK_PermuteSingleSrc, Ty,
2441 std::nullopt, CostKind, 0, Ty);
2442 ArithCost +=
2443 NumReduxLevels * thisT()->getArithmeticInstrCost(Opcode, Ty, CostKind);
2444 return ShuffleCost + ArithCost +
2445 thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
2446 CostKind, 0, nullptr, nullptr);
2447 }
2448
2449 /// Try to calculate the cost of performing strict (in-order) reductions,
2450 /// which involves doing a sequence of floating point additions in lane
2451 /// order, starting with an initial value. For example, consider a scalar
2452 /// initial value 'InitVal' of type float and a vector of type <4 x float>:
2453 ///
2454 /// Vector = <float %v0, float %v1, float %v2, float %v3>
2455 ///
2456 /// %add1 = %InitVal + %v0
2457 /// %add2 = %add1 + %v1
2458 /// %add3 = %add2 + %v2
2459 /// %add4 = %add3 + %v3
2460 ///
2461 /// As a simple estimate we can say the cost of such a reduction is 4 times
2462 /// the cost of a scalar FP addition. We can only estimate the costs for
2463 /// fixed-width vectors here because for scalable vectors we do not know the
2464 /// runtime number of operations.
2467 // Targets must implement a default value for the scalable case, since
2468 // we don't know how many lanes the vector has.
2469 if (isa<ScalableVectorType>(Ty))
2471
2472 auto *VTy = cast<FixedVectorType>(Ty);
2474 VTy, /*Insert=*/false, /*Extract=*/true, CostKind);
2475 InstructionCost ArithCost = thisT()->getArithmeticInstrCost(
2476 Opcode, VTy->getElementType(), CostKind);
2477 ArithCost *= VTy->getNumElements();
2478
2479 return ExtractCost + ArithCost;
2480 }
2481
2483 std::optional<FastMathFlags> FMF,
2485 assert(Ty && "Unknown reduction vector type");
2487 return getOrderedReductionCost(Opcode, Ty, CostKind);
2488 return getTreeReductionCost(Opcode, Ty, CostKind);
2489 }
2490
2491 /// Try to calculate op costs for min/max reduction operations.
2492 /// \param CondTy Conditional type for the Select instruction.
2494 FastMathFlags FMF,
2496 // Targets must implement a default value for the scalable case, since
2497 // we don't know how many lanes the vector has.
2498 if (isa<ScalableVectorType>(Ty))
2500
2501 Type *ScalarTy = Ty->getElementType();
2502 unsigned NumVecElts = cast<FixedVectorType>(Ty)->getNumElements();
2503 unsigned NumReduxLevels = Log2_32(NumVecElts);
2504 InstructionCost MinMaxCost = 0;
2505 InstructionCost ShuffleCost = 0;
2506 std::pair<InstructionCost, MVT> LT = thisT()->getTypeLegalizationCost(Ty);
2507 unsigned LongVectorCount = 0;
2508 unsigned MVTLen =
2509 LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
2510 while (NumVecElts > MVTLen) {
2511 NumVecElts /= 2;
2512 auto *SubTy = FixedVectorType::get(ScalarTy, NumVecElts);
2513
2514 ShuffleCost +=
2515 thisT()->getShuffleCost(TTI::SK_ExtractSubvector, Ty, std::nullopt,
2516 CostKind, NumVecElts, SubTy);
2517
2518 IntrinsicCostAttributes Attrs(IID, SubTy, {SubTy, SubTy}, FMF);
2519 MinMaxCost += getIntrinsicInstrCost(Attrs, CostKind);
2520 Ty = SubTy;
2521 ++LongVectorCount;
2522 }
2523
2524 NumReduxLevels -= LongVectorCount;
2525
2526 // The minimal length of the vector is limited by the real length of vector
2527 // operations performed on the current platform. That's why several final
2528 // reduction opertions are perfomed on the vectors with the same
2529 // architecture-dependent length.
2530 ShuffleCost +=
2531 NumReduxLevels * thisT()->getShuffleCost(TTI::SK_PermuteSingleSrc, Ty,
2532 std::nullopt, CostKind, 0, Ty);
2533 IntrinsicCostAttributes Attrs(IID, Ty, {Ty, Ty}, FMF);
2534 MinMaxCost += NumReduxLevels * getIntrinsicInstrCost(Attrs, CostKind);
2535 // The last min/max should be in vector registers and we counted it above.
2536 // So just need a single extractelement.
2537 return ShuffleCost + MinMaxCost +
2538 thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
2539 CostKind, 0, nullptr, nullptr);
2540 }
2541
2542 InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned,
2543 Type *ResTy, VectorType *Ty,
2544 FastMathFlags FMF,
2546 // Without any native support, this is equivalent to the cost of
2547 // vecreduce.opcode(ext(Ty A)).
2548 VectorType *ExtTy = VectorType::get(ResTy, Ty);
2549 InstructionCost RedCost =
2550 thisT()->getArithmeticReductionCost(Opcode, ExtTy, FMF, CostKind);
2551 InstructionCost ExtCost = thisT()->getCastInstrCost(
2552 IsUnsigned ? Instruction::ZExt : Instruction::SExt, ExtTy, Ty,
2554
2555 return RedCost + ExtCost;
2556 }
2557
2559 VectorType *Ty,
2561 // Without any native support, this is equivalent to the cost of
2562 // vecreduce.add(mul(ext(Ty A), ext(Ty B))) or
2563 // vecreduce.add(mul(A, B)).
2564 VectorType *ExtTy = VectorType::get(ResTy, Ty);
2565 InstructionCost RedCost = thisT()->getArithmeticReductionCost(
2566 Instruction::Add, ExtTy, std::nullopt, CostKind);
2567 InstructionCost ExtCost = thisT()->getCastInstrCost(
2568 IsUnsigned ? Instruction::ZExt : Instruction::SExt, ExtTy, Ty,
2570
2571 InstructionCost MulCost =
2572 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy, CostKind);
2573
2574 return RedCost + MulCost + 2 * ExtCost;
2575 }
2576
2578
2579 /// @}
2580};
2581
2582/// Concrete BasicTTIImpl that can be used if no further customization
2583/// is needed.
2584class BasicTTIImpl : public BasicTTIImplBase<BasicTTIImpl> {
2586
2587 friend class BasicTTIImplBase<BasicTTIImpl>;
2588
2589 const TargetSubtargetInfo *ST;
2590 const TargetLoweringBase *TLI;
2591
2592 const TargetSubtargetInfo *getST() const { return ST; }
2593 const TargetLoweringBase *getTLI() const { return TLI; }
2594
2595public:
2596 explicit BasicTTIImpl(const TargetMachine *TM, const Function &F);
2597};
2598
2599} // end namespace llvm
2600
2601#endif // LLVM_CODEGEN_BASICTTIIMPL_H
This file implements a class to represent arbitrary precision integral constant values and operations...
This file implements the BitVector class.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static cl::opt< TargetTransformInfo::TargetCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(TargetTransformInfo::TCK_RecipThroughput), cl::values(clEnumValN(TargetTransformInfo::TCK_RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(TargetTransformInfo::TCK_Latency, "latency", "Instruction latency"), clEnumValN(TargetTransformInfo::TCK_CodeSize, "code-size", "Code size"), clEnumValN(TargetTransformInfo::TCK_SizeAndLatency, "size-latency", "Code size and latency")))
return RetTy
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
mir Rename Register Operands
static const Function * getCalledFunction(const Value *V, bool &IsNoBuiltin)
LLVMContext & Context
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const char LLVMTargetMachineRef TM
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:40
This file describes how to lower LLVM code to machine code.
This file provides helpers for the implementation of a TargetTransformInfo-conforming class.
This pass exposes codegen information to IR-level passes.
Class for arbitrary precision integers.
Definition: APInt.h:76
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition: APInt.h:212
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
Definition: APInt.h:1308
bool sgt(const APInt &RHS) const
Signed greater than comparison.
Definition: APInt.h:1179
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1439
bool slt(const APInt &RHS) const
Signed less than comparison.
Definition: APInt.h:1108
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition: APInt.h:178
an instruction to allocate memory on the stack
Definition: Instructions.h:59
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
Definition: ArrayRef.h:204
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
ArrayRef< T > drop_back(size_t N=1) const
Drop the last N elements of the array.
Definition: ArrayRef.h:210
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition: BasicBlock.h:60
Base class which can be used to help build a TTI implementation.
Definition: BasicTTIImpl.h:80
bool isTypeLegal(Type *Ty)
Definition: BasicTTIImpl.h:428
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind)
Get intrinsic cost based on arguments.
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
Definition: BasicTTIImpl.h:286
virtual unsigned getPrefetchDistance() const
Definition: BasicTTIImpl.h:722
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false)
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace)
Definition: BasicTTIImpl.h:405
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE)
Definition: BasicTTIImpl.h:582
bool preferToKeepConstantsAttached(const Instruction &Inst, const Function &Fn) const
Definition: BasicTTIImpl.h:555
unsigned getMaxInterleaveFactor(ElementCount VF)
Definition: BasicTTIImpl.h:889
unsigned getNumberOfParts(Type *Tp)
InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *DataTy, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind)
InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index)
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const
Definition: BasicTTIImpl.h:751
std::optional< unsigned > getVScaleForTuning() const
Definition: BasicTTIImpl.h:756
InstructionCost getOrderedReductionCost(unsigned Opcode, VectorType *Ty, TTI::TargetCostKind CostKind)
Try to calculate the cost of performing strict (in-order) reductions, which involves doing a sequence...
bool isTruncateFree(Type *Ty1, Type *Ty2)
Definition: BasicTTIImpl.h:418
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Op0, Value *Op1)
bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo)
Definition: BasicTTIImpl.h:662
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args=ArrayRef< const Value * >(), const Instruction *CxtI=nullptr)
Definition: BasicTTIImpl.h:891
InstructionCost getTreeReductionCost(unsigned Opcode, VectorType *Ty, TTI::TargetCostKind CostKind)
Try to calculate arithmetic and shuffle op costs for reduction intrinsics.
bool preferPredicateOverEpilogue(TailFoldingInfo *TFI)
Definition: BasicTTIImpl.h:669
virtual bool shouldPrefetchAddressSpace(unsigned AS) const
Definition: BasicTTIImpl.h:742
InstructionCost getStridedMemoryOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I)
bool isLegalICmpImmediate(int64_t imm)
Definition: BasicTTIImpl.h:335
bool isProfitableToHoist(Instruction *I)
Definition: BasicTTIImpl.h:422
virtual unsigned getMaxPrefetchIterationsAhead() const
Definition: BasicTTIImpl.h:734
InstructionCost getVectorInstrCost(const Instruction &I, Type *Val, TTI::TargetCostKind CostKind, unsigned Index)
std::optional< unsigned > getMaxVScale() const
Definition: BasicTTIImpl.h:755
TTI::ShuffleKind improveShuffleKindFromMask(TTI::ShuffleKind Kind, ArrayRef< int > Mask, VectorType *Ty, int &Index, VectorType *&SubTy) const
Definition: BasicTTIImpl.h:969
InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind)
unsigned getRegUsageForType(Type *Ty)
Definition: BasicTTIImpl.h:433
bool shouldBuildRelLookupTables() const
Definition: BasicTTIImpl.h:509
InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind)
Try to calculate op costs for min/max reduction operations.
unsigned getCallerAllocaCost(const CallBase *CB, const AllocaInst *AI) const
Definition: BasicTTIImpl.h:576
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr)
InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI, unsigned &JumpTableSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI)
Definition: BasicTTIImpl.h:444
bool isIndexedLoadLegal(TTI::MemIndexedMode M, Type *Ty, const DataLayout &DL) const
Definition: BasicTTIImpl.h:376
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args=std::nullopt, const Instruction *CxtI=nullptr)
bool isLSRCostLess(TTI::LSRCost C1, TTI::LSRCost C2)
Definition: BasicTTIImpl.h:388
std::optional< Value * > simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed)
Definition: BasicTTIImpl.h:684
bool shouldFoldTerminatingConditionAfterLSR() const
Definition: BasicTTIImpl.h:396
virtual unsigned getMinPrefetchStride(unsigned NumMemAccesses, unsigned NumStridedMemAccesses, unsigned NumPrefetches, bool HasCall) const
Definition: BasicTTIImpl.h:726
bool hasBranchDivergence(const Function *F=nullptr)
Definition: BasicTTIImpl.h:280
bool isIndexedStoreLegal(TTI::MemIndexedMode M, Type *Ty, const DataLayout &DL) const
Definition: BasicTTIImpl.h:382
unsigned getAssumedAddrSpace(const Value *V) const
Definition: BasicTTIImpl.h:308
InstructionCost getOperandsScalarizationOverhead(ArrayRef< const Value * > Args, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind)
Estimate the overhead of scalarizing an instructions unique non-constant operands.
Definition: BasicTTIImpl.h:808
InstructionCost getAddressComputationCost(Type *Ty, ScalarEvolution *, const SCEV *)
InstructionCost getScalarizationOverhead(VectorType *InTy, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind)
Estimate the overhead of scalarizing an instruction.
Definition: BasicTTIImpl.h:762
int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset, int64_t MaxOffset)
Definition: BasicTTIImpl.h:352
InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr, ArrayRef< const Value * > Operands, Type *AccessType, TTI::TargetCostKind CostKind)
Definition: BasicTTIImpl.h:438
bool isFCmpOrdCheaperThanFCmpZero(Type *Ty)
Definition: BasicTTIImpl.h:541
virtual std::optional< unsigned > getCacheSize(TargetTransformInfo::CacheLevel Level) const
Definition: BasicTTIImpl.h:702
bool isAlwaysUniform(const Value *V)
Definition: BasicTTIImpl.h:284
TailFoldingStyle getPreferredTailFoldingStyle(bool IVUpdateMayOverflow=true)
Definition: BasicTTIImpl.h:674
bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, unsigned AddressSpace, Align Alignment, unsigned *Fast) const
Definition: BasicTTIImpl.h:272
unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy, Type *ScalarValTy) const
Definition: BasicTTIImpl.h:356
InstructionCost getScalarizationOverhead(VectorType *InTy, bool Insert, bool Extract, TTI::TargetCostKind CostKind)
Helper wrapper for the DemandedElts variant of getScalarizationOverhead.
Definition: BasicTTIImpl.h:792
virtual std::optional< unsigned > getCacheAssociativity(TargetTransformInfo::CacheLevel Level) const
Definition: BasicTTIImpl.h:708
virtual bool enableWritePrefetching() const
Definition: BasicTTIImpl.h:738
Value * rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV, Value *NewV) const
Definition: BasicTTIImpl.h:322
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP)
Definition: BasicTTIImpl.h:654
InstructionCost getMulAccReductionCost(bool IsUnsigned, Type *ResTy, VectorType *Ty, TTI::TargetCostKind CostKind)
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
bool collectFlatAddressOperands(SmallVectorImpl< int > &OpIndexes, Intrinsic::ID IID) const
Definition: BasicTTIImpl.h:299
InstructionCost getCallInstrCost(Function *F, Type *RetTy, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind)
Compute a cost of the given call instruction.
InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind)
InstructionCost getFPOpCost(Type *Ty)
Definition: BasicTTIImpl.h:545
InstructionCost getVectorSplitCost()
std::pair< InstructionCost, MVT > getTypeLegalizationCost(Type *Ty) const
Estimate the cost of type-legalization and the legalized type.
Definition: BasicTTIImpl.h:855
bool haveFastSqrt(Type *Ty)
Definition: BasicTTIImpl.h:534
std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const
Definition: BasicTTIImpl.h:318
unsigned getInliningThresholdMultiplier() const
Definition: BasicTTIImpl.h:574
InstructionCost getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF, const APInt &DemandedDstElts, TTI::TargetCostKind CostKind)
virtual ~BasicTTIImplBase()=default
bool isLegalAddScalableImmediate(int64_t Imm)
Definition: BasicTTIImpl.h:331
InstructionCost getScalarizationOverhead(VectorType *RetTy, ArrayRef< const Value * > Args, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind)
Estimate the overhead of scalarizing the inputs and outputs of an instruction, with return type RetTy...
Definition: BasicTTIImpl.h:837
bool isVScaleKnownToBeAPowerOfTwo() const
Definition: BasicTTIImpl.h:757
std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II)
Definition: BasicTTIImpl.h:678
bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const
Definition: BasicTTIImpl.h:290
bool isLegalAddImmediate(int64_t imm)
Definition: BasicTTIImpl.h:327
unsigned getFlatAddressSpace()
Definition: BasicTTIImpl.h:294
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
virtual unsigned getCacheLineSize() const
Definition: BasicTTIImpl.h:718
bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
Definition: BasicTTIImpl.h:304
bool isSourceOfDivergence(const Value *V)
Definition: BasicTTIImpl.h:282
int getInlinerVectorBonusPercent() const
Definition: BasicTTIImpl.h:580
InstructionCost getTypeBasedIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind)
Get intrinsic cost based on argument types.
std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp)
Definition: BasicTTIImpl.h:691
bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace, Instruction *I=nullptr, int64_t ScalableOffset=0)
Definition: BasicTTIImpl.h:339
bool isSingleThreaded() const
Definition: BasicTTIImpl.h:312
BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
Definition: BasicTTIImpl.h:263
unsigned adjustInliningThreshold(const CallBase *CB)
Definition: BasicTTIImpl.h:575
bool isProfitableLSRChainElement(Instruction *I)
Definition: BasicTTIImpl.h:401
Concrete BasicTTIImpl that can be used if no further customization is needed.
size_type count() const
count - Returns the number of bits which are set.
Definition: BitVector.h:162
BitVector & set()
Definition: BitVector.h:351
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1494
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition: InstrTypes.h:1362
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:993
@ ICMP_UGT
unsigned greater than
Definition: InstrTypes.h:1016
@ ICMP_SGT
signed greater than
Definition: InstrTypes.h:1020
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:1018
@ ICMP_EQ
equal
Definition: InstrTypes.h:1014
@ ICMP_NE
not equal
Definition: InstrTypes.h:1015
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition: InstrTypes.h:1003
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
TypeSize getTypeStoreSizeInBits(Type *Ty) const
Returns the maximum number of bits that may be overwritten by storing the specified type; always a mu...
Definition: DataLayout.h:484
unsigned getIndexSizeInBits(unsigned AS) const
Size in bits of index used for address calculation in getelementptr.
Definition: DataLayout.h:420
constexpr bool isVector() const
One or more elements.
Definition: TypeSize.h:311
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition: TypeSize.h:296
constexpr bool isScalar() const
Exactly one element.
Definition: TypeSize.h:307
Convenience struct for specifying and reasoning about fast-math flags.
Definition: FMF.h:20
Class to represent fixed width SIMD vectors.
Definition: DerivedTypes.h:539
unsigned getNumElements() const
Definition: DerivedTypes.h:582
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
Definition: Type.cpp:692
bool isTargetIntrinsic() const
isTargetIntrinsic - Returns true if this function is an intrinsic and the intrinsic is specific to a ...
Definition: Function.cpp:883
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition: Function.h:339
The core instruction combiner logic.
Definition: InstCombiner.h:47
static InstructionCost getInvalid(CostType Val=0)
std::optional< CostType > getValue() const
This function is intended to be used as sparingly as possible, since the class provides the full rang...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:252
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:278
const SmallVectorImpl< Type * > & getArgTypes() const
const SmallVectorImpl< const Value * > & getArgs() const
InstructionCost getScalarizationCost() const
const IntrinsicInst * getInst() const
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:47
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:44
virtual bool shouldPrefetchAddressSpace(unsigned AS) const
virtual unsigned getMinPrefetchStride(unsigned NumMemAccesses, unsigned NumStridedMemAccesses, unsigned NumPrefetches, bool HasCall) const
Return the minimum stride necessary to trigger software prefetching.
virtual bool enableWritePrefetching() const
virtual unsigned getMaxPrefetchIterationsAhead() const
Return the maximum prefetch distance in terms of loop iterations.
virtual unsigned getPrefetchDistance() const
Return the preferred prefetch distance in terms of instructions.
virtual std::optional< unsigned > getCacheAssociativity(unsigned Level) const
Return the cache associatvity for the given level of cache.
virtual std::optional< unsigned > getCacheLineSize(unsigned Level) const
Return the target cache line size in bytes at a given level.
Machine Value Type.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
The optimization diagnostic interface.
void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Diagnostic information for applied optimization remarks.
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Analysis providing profile information.
This class represents an analyzed expression in the program.
The main scalar evolution driver.
static bool isZeroEltSplatMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses all elements with the same value as the first element of exa...
static bool isSpliceMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is a splice mask, concatenating the two inputs together and then ext...
static bool isSelectMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from its source vectors without lane crossings.
static bool isExtractSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is an extract subvector mask.
static bool isReverseMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask swaps the order of elements from exactly one source vector.
static bool isTransposeMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask is a transpose mask.
static bool isInsertSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &NumSubElts, int &Index)
Return true if this shuffle mask is an insert subvector mask.
size_type size() const
Definition: SmallPtrSet.h:94
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:342
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:427
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
static StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
Definition: Type.cpp:513
Multiway switch.
Provides information about what library functions are available for the current target.
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
bool isOperationExpand(unsigned Op, EVT VT) const
Return true if the specified operation is illegal on this target or unlikely to be made legal with cu...
int InstructionOpcodeToISD(unsigned Opcode) const
Get the ISD node that corresponds to the Instruction class opcode.
bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const
Return true if the specified indexed load is legal on this target.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
virtual bool isLegalICmpImmediate(int64_t) const
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
const TargetMachine & getTargetMachine() const
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases, uint64_t Range, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) const
Return true if lowering to a jump table is suitable for a set of case clusters which may contain NumC...
virtual bool areJTsAllowed(const Function *Fn) const
Return true if lowering to a jump table is allowed.
bool isOperationLegalOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal using promotion.
virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
virtual bool isCheapToSpeculateCttz(Type *Ty) const
Return true if it is cheap to speculate a call to intrinsic cttz.
bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const
Return true if the specified store with truncation is legal on this target.
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *=nullptr) const
Determine if the target supports unaligned memory accesses.
virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const
Return true if it's free to truncate a value of type FromTy to type ToTy.
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
bool isSuitableForBitTests(unsigned NumDests, unsigned NumCmps, const APInt &Low, const APInt &High, const DataLayout &DL) const
Return true if lowering to a bit test is suitable for a set of case clusters which contains NumDests ...
virtual bool isLegalAddImmediate(int64_t) const
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
virtual bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g.
LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const
Return how this store with truncation should be treated: either it is legal, needs to be promoted to ...
LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return how this load with extension should be treated: either it is legal, needs to be promoted to a ...
virtual bool isIntDivCheap(EVT VT, AttributeList Attr) const
Return true if integer divide is usually cheaper than a sequence of several shifts,...
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
virtual bool isProfitableToHoist(Instruction *I) const
bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const
Return true if the specified indexed load is legal on this target.
bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return true if the specified load with extension is legal on this target.
virtual bool isCheapToSpeculateCtlz(Type *Ty) const
Return true if it is cheap to speculate a call to intrinsic ctlz.
virtual int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset, int64_t MaxOffset) const
Return the prefered common base offset.
LegalizeKind getTypeConversion(LLVMContext &Context, EVT VT) const
Return pair that represents the legalization kind (first) that needs to happen to EVT (second) in ord...
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
virtual bool isLegalAddScalableImmediate(int64_t) const
Return true if adding the specified scalable immediate is legal, that is the target has add instructi...
bool isBeneficialToExpandPowI(int64_t Exponent, bool OptForSize) const
Return true if it is beneficial to expand an @llvm.powi.
virtual bool isFAbsFree(EVT VT) const
Return true if an fabs operation is free to the point where it is never worthwhile to replace it with...
virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AddrSpace, Instruction *I=nullptr) const
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
std::pair< LegalizeTypeAction, EVT > LegalizeKind
LegalizeKind holds the legalization kind that needs to happen to EVT in order to type-legalize it.
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:76
virtual std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const
If the specified predicate checks whether a generic pointer falls within a specified address space,...
virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast between SrcAS and DestAS is a noop.
virtual unsigned getAssumedAddrSpace(const Value *V) const
If the specified generic pointer could be assumed as a pointer to a specific address space,...
TargetOptions Options
ThreadModel::Model ThreadModel
ThreadModel - This flag specifies the type of threading model to assume for things like atomics.
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual bool useAA() const
Enable use of alias analysis during code generation (during MI scheduling, DAGCombine,...
const DataLayout & getDataLayout() const
std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) const
bool isLSRCostLess(const TTI::LSRCost &C1, const TTI::LSRCost &C2) const
bool isProfitableLSRChainElement(Instruction *I) const
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const
bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I) const
std::optional< unsigned > getCacheAssociativity(TargetTransformInfo::CacheLevel Level) const
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info, TTI::OperandValueInfo Opd2Info, ArrayRef< const Value * > Args, const Instruction *CxtI=nullptr) const
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, const Instruction *I) const
bool isLoweredToCall(const Function *F) const
bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) const
TailFoldingStyle getPreferredTailFoldingStyle(bool IVUpdateMayOverflow=true) const
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const
std::optional< Value * > simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed) const
std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const
CRTP base class for use as a mix-in that aids implementing a TargetTransformInfo-compatible class.
InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr, ArrayRef< const Value * > Operands, Type *AccessType, TTI::TargetCostKind CostKind)
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind=TTI::TCK_SizeAndLatency, const Instruction *I=nullptr) const
static OperandValueInfo getOperandInfo(const Value *V)
Collect properties of V used in cost analysis, e.g. OP_PowerOf2.
TargetCostKind
The kind of cost model.
@ TCK_RecipThroughput
Reciprocal throughput.
@ TCK_CodeSize
Instruction code size.
static bool requiresOrderedReduction(std::optional< FastMathFlags > FMF)
A helper function to determine the type of reduction algorithm used for a given Opcode and set of Fas...
@ TCC_Expensive
The cost of a 'div' instruction on x86.
@ TCC_Basic
The cost of a typical 'add' instruction.
MemIndexedMode
The type of load/store indexing.
@ MIM_PostInc
Post-incrementing.
@ MIM_PostDec
Post-decrementing.
ShuffleKind
The various kinds of shuffle patterns for vector queries.
@ SK_InsertSubvector
InsertSubvector. Index indicates start offset.
@ SK_Select
Selects elements from the corresponding lane of either source operand.
@ SK_PermuteSingleSrc
Shuffle elements of single source vector with any shuffle mask.
@ SK_Transpose
Transpose two vectors.
@ SK_Splice
Concatenates elements from the first input vector with elements of the second input vector.
@ SK_Broadcast
Broadcast element 0 to all other elements.
@ SK_PermuteTwoSrc
Merge elements from two source vectors into one with any shuffle mask.
@ SK_Reverse
Reverse the order of the vector.
@ SK_ExtractSubvector
ExtractSubvector Index indicates start offset.
CastContextHint
Represents a hint about the context in which a cast is used.
@ None
The cast is not used with a load/store of any kind.
@ Normal
The cast is used with a normal load/store.
CacheLevel
The possible cache levels.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition: Triple.h:361
bool isArch64Bit() const
Test whether the architecture is 64-bit.
Definition: Triple.cpp:1538
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, XROS, or DriverKit).
Definition: Triple.h:542
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition: TypeSize.h:330
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:265
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition: Type.h:234
static IntegerType * getInt1Ty(LLVMContext &C)
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Type * getWithNewBitWidth(unsigned NewBitWidth) const
Given an integer or vector type, change the lane bitwidth to NewBitwidth, whilst keeping the old numb...
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:129
static IntegerType * getInt8Ty(LLVMContext &C)
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition: Type.h:262
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition: Type.h:216
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition: Type.h:348
Value * getOperand(unsigned i) const
Definition: User.h:169
static bool isVPBinOp(Intrinsic::ID ID)
static std::optional< unsigned > getFunctionalOpcodeForVP(Intrinsic::ID ID)
static std::optional< Intrinsic::ID > getFunctionalIntrinsicIDForVP(Intrinsic::ID ID)
static bool isVPIntrinsic(Intrinsic::ID)
static bool isVPReduction(Intrinsic::ID ID)
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
Base class of all SIMD vector types.
Definition: DerivedTypes.h:403
static VectorType * getHalfElementsVectorType(VectorType *VTy)
This static method returns a VectorType with half as many elements as the input type and the same ele...
Definition: DerivedTypes.h:507
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
Definition: DerivedTypes.h:641
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Definition: Type.cpp:676
Type * getElementType() const
Definition: DerivedTypes.h:436
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition: TypeSize.h:203
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:171
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:168
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
APInt ScaleBitMask(const APInt &A, unsigned NewBitWidth, bool MatchAllBits=false)
Splat/Merge neighboring bits to widen/narrow the bitmask represented by.
Definition: APInt.cpp:2978
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ BSWAP
Byte Swap and Counting operators.
Definition: ISDOpcodes.h:714
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
Definition: ISDOpcodes.h:483
@ FADD
Simple binary floating point operators.
Definition: ISDOpcodes.h:390
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition: ISDOpcodes.h:255
@ BRIND
BRIND - Indirect branch.
Definition: ISDOpcodes.h:1052
@ BR_JT
BR_JT - Jumptable branch.
Definition: ISDOpcodes.h:1056
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
Definition: ISDOpcodes.h:500
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
Definition: ISDOpcodes.h:727
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
Definition: ISDOpcodes.h:971
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
Definition: ISDOpcodes.h:736
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
Definition: ISDOpcodes.h:984
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition: ISDOpcodes.h:493
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
Definition: ISDOpcodes.h:1472
DiagnosticInfoOptimizationBase::Argument NV
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
Intrinsic::ID getMinMaxReductionIntrinsicOp(Intrinsic::ID RdxID)
Returns the min/max intrinsic used when expanding a min/max reduction.
Definition: LoopUtils.cpp:950
uint64_t divideCeil(uint64_t Numerator, uint64_t Denominator)
Returns the integer ceil(Numerator / Denominator).
Definition: MathExtras.h:417
AddressSpace
Definition: NVPTXBaseInfo.h:21
unsigned getArithmeticReductionInstruction(Intrinsic::ID RdxID)
Returns the arithmetic instruction opcode used when expanding a reduction.
Definition: LoopUtils.cpp:921
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1729
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:313
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:264
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:191
InstructionCost Cost
cl::opt< unsigned > PartialUnrollingThreshold
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Extended Value Type.
Definition: ValueTypes.h:34
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition: ValueTypes.h:136
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
Definition: ValueTypes.cpp:628
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:306
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
Definition: ValueTypes.h:64
Attributes of a target dependent hardware loop.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
bool AllowPeeling
Allow peeling off loop iterations.
bool AllowLoopNestsPeeling
Allow peeling off loop iterations for loop nests.
bool PeelProfiledIterations
Allow peeling basing on profile.
unsigned PeelCount
A forced peeling factor (the number of bodied of the original loop that should be peeled off before t...
Parameters that control the generic loop unrolling transformation.
bool UpperBound
Allow using trip count upper bound to unroll loops.
unsigned PartialOptSizeThreshold
The cost threshold for the unrolled loop when optimizing for size, like OptSizeThreshold,...
unsigned PartialThreshold
The cost threshold for the unrolled loop, like Threshold, but used for partial/runtime unrolling (set...
bool Runtime
Allow runtime unrolling (unrolling of loops to expand the size of the loop body even when the number ...
bool Partial
Allow partial unrolling (unrolling of loops to expand the size of the loop body, not only to eliminat...
unsigned OptSizeThreshold
The cost threshold for the unrolled loop when optimizing for size (set to UINT_MAX to disable).