LLVM 19.0.0git
ARMTargetTransformInfo.cpp
Go to the documentation of this file.
1//===- ARMTargetTransformInfo.cpp - ARM specific TTI ----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
10#include "ARMSubtarget.h"
12#include "llvm/ADT/APInt.h"
19#include "llvm/IR/BasicBlock.h"
20#include "llvm/IR/DataLayout.h"
22#include "llvm/IR/Instruction.h"
25#include "llvm/IR/Intrinsics.h"
26#include "llvm/IR/IntrinsicsARM.h"
28#include "llvm/IR/Type.h"
37#include <algorithm>
38#include <cassert>
39#include <cstdint>
40#include <optional>
41#include <utility>
42
43using namespace llvm;
44
45#define DEBUG_TYPE "armtti"
46
48 "enable-arm-maskedldst", cl::Hidden, cl::init(true),
49 cl::desc("Enable the generation of masked loads and stores"));
50
52 "disable-arm-loloops", cl::Hidden, cl::init(false),
53 cl::desc("Disable the generation of low-overhead loops"));
54
55static cl::opt<bool>
56 AllowWLSLoops("allow-arm-wlsloops", cl::Hidden, cl::init(true),
57 cl::desc("Enable the generation of WLS loops"));
58
60
62
64
65/// Convert a vector load intrinsic into a simple llvm load instruction.
66/// This is beneficial when the underlying object being addressed comes
67/// from a constant, since we get constant-folding for free.
68static Value *simplifyNeonVld1(const IntrinsicInst &II, unsigned MemAlign,
69 InstCombiner::BuilderTy &Builder) {
70 auto *IntrAlign = dyn_cast<ConstantInt>(II.getArgOperand(1));
71
72 if (!IntrAlign)
73 return nullptr;
74
75 unsigned Alignment = IntrAlign->getLimitedValue() < MemAlign
76 ? MemAlign
77 : IntrAlign->getLimitedValue();
78
79 if (!isPowerOf2_32(Alignment))
80 return nullptr;
81
82 auto *BCastInst = Builder.CreateBitCast(II.getArgOperand(0),
83 PointerType::get(II.getType(), 0));
84 return Builder.CreateAlignedLoad(II.getType(), BCastInst, Align(Alignment));
85}
86
88 const Function *Callee) const {
89 const TargetMachine &TM = getTLI()->getTargetMachine();
90 const FeatureBitset &CallerBits =
91 TM.getSubtargetImpl(*Caller)->getFeatureBits();
92 const FeatureBitset &CalleeBits =
93 TM.getSubtargetImpl(*Callee)->getFeatureBits();
94
95 // To inline a callee, all features not in the allowed list must match exactly.
96 bool MatchExact = (CallerBits & ~InlineFeaturesAllowed) ==
97 (CalleeBits & ~InlineFeaturesAllowed);
98 // For features in the allowed list, the callee's features must be a subset of
99 // the callers'.
100 bool MatchSubset = ((CallerBits & CalleeBits) & InlineFeaturesAllowed) ==
101 (CalleeBits & InlineFeaturesAllowed);
102 return MatchExact && MatchSubset;
103}
104
107 ScalarEvolution *SE) const {
108 if (ST->hasMVEIntegerOps())
110
111 if (L->getHeader()->getParent()->hasOptSize())
112 return TTI::AMK_None;
113
114 if (ST->isMClass() && ST->isThumb2() &&
115 L->getNumBlocks() == 1)
116 return TTI::AMK_PreIndexed;
117
118 return TTI::AMK_None;
119}
120
121std::optional<Instruction *>
123 using namespace PatternMatch;
124 Intrinsic::ID IID = II.getIntrinsicID();
125 switch (IID) {
126 default:
127 break;
128 case Intrinsic::arm_neon_vld1: {
129 Align MemAlign =
132 if (Value *V = simplifyNeonVld1(II, MemAlign.value(), IC.Builder)) {
133 return IC.replaceInstUsesWith(II, V);
134 }
135 break;
136 }
137
138 case Intrinsic::arm_neon_vld2:
139 case Intrinsic::arm_neon_vld3:
140 case Intrinsic::arm_neon_vld4:
141 case Intrinsic::arm_neon_vld2lane:
142 case Intrinsic::arm_neon_vld3lane:
143 case Intrinsic::arm_neon_vld4lane:
144 case Intrinsic::arm_neon_vst1:
145 case Intrinsic::arm_neon_vst2:
146 case Intrinsic::arm_neon_vst3:
147 case Intrinsic::arm_neon_vst4:
148 case Intrinsic::arm_neon_vst2lane:
149 case Intrinsic::arm_neon_vst3lane:
150 case Intrinsic::arm_neon_vst4lane: {
151 Align MemAlign =
154 unsigned AlignArg = II.arg_size() - 1;
155 Value *AlignArgOp = II.getArgOperand(AlignArg);
156 MaybeAlign Align = cast<ConstantInt>(AlignArgOp)->getMaybeAlignValue();
157 if (Align && *Align < MemAlign) {
158 return IC.replaceOperand(
159 II, AlignArg,
160 ConstantInt::get(Type::getInt32Ty(II.getContext()), MemAlign.value(),
161 false));
162 }
163 break;
164 }
165
166 case Intrinsic::arm_mve_pred_i2v: {
167 Value *Arg = II.getArgOperand(0);
168 Value *ArgArg;
169 if (match(Arg, PatternMatch::m_Intrinsic<Intrinsic::arm_mve_pred_v2i>(
170 PatternMatch::m_Value(ArgArg))) &&
171 II.getType() == ArgArg->getType()) {
172 return IC.replaceInstUsesWith(II, ArgArg);
173 }
174 Constant *XorMask;
175 if (match(Arg, m_Xor(PatternMatch::m_Intrinsic<Intrinsic::arm_mve_pred_v2i>(
176 PatternMatch::m_Value(ArgArg)),
177 PatternMatch::m_Constant(XorMask))) &&
178 II.getType() == ArgArg->getType()) {
179 if (auto *CI = dyn_cast<ConstantInt>(XorMask)) {
180 if (CI->getValue().trunc(16).isAllOnes()) {
181 auto TrueVector = IC.Builder.CreateVectorSplat(
182 cast<FixedVectorType>(II.getType())->getNumElements(),
183 IC.Builder.getTrue());
184 return BinaryOperator::Create(Instruction::Xor, ArgArg, TrueVector);
185 }
186 }
187 }
188 KnownBits ScalarKnown(32);
189 if (IC.SimplifyDemandedBits(&II, 0, APInt::getLowBitsSet(32, 16),
190 ScalarKnown, 0)) {
191 return &II;
192 }
193 break;
194 }
195 case Intrinsic::arm_mve_pred_v2i: {
196 Value *Arg = II.getArgOperand(0);
197 Value *ArgArg;
198 if (match(Arg, PatternMatch::m_Intrinsic<Intrinsic::arm_mve_pred_i2v>(
199 PatternMatch::m_Value(ArgArg)))) {
200 return IC.replaceInstUsesWith(II, ArgArg);
201 }
202 if (!II.getMetadata(LLVMContext::MD_range)) {
203 Type *IntTy32 = Type::getInt32Ty(II.getContext());
204 Metadata *M[] = {
205 ConstantAsMetadata::get(ConstantInt::get(IntTy32, 0)),
206 ConstantAsMetadata::get(ConstantInt::get(IntTy32, 0x10000))};
207 II.setMetadata(LLVMContext::MD_range, MDNode::get(II.getContext(), M));
208 II.setMetadata(LLVMContext::MD_noundef,
209 MDNode::get(II.getContext(), std::nullopt));
210 return &II;
211 }
212 break;
213 }
214 case Intrinsic::arm_mve_vadc:
215 case Intrinsic::arm_mve_vadc_predicated: {
216 unsigned CarryOp =
217 (II.getIntrinsicID() == Intrinsic::arm_mve_vadc_predicated) ? 3 : 2;
218 assert(II.getArgOperand(CarryOp)->getType()->getScalarSizeInBits() == 32 &&
219 "Bad type for intrinsic!");
220
221 KnownBits CarryKnown(32);
222 if (IC.SimplifyDemandedBits(&II, CarryOp, APInt::getOneBitSet(32, 29),
223 CarryKnown)) {
224 return &II;
225 }
226 break;
227 }
228 case Intrinsic::arm_mve_vmldava: {
229 Instruction *I = cast<Instruction>(&II);
230 if (I->hasOneUse()) {
231 auto *User = cast<Instruction>(*I->user_begin());
232 Value *OpZ;
233 if (match(User, m_c_Add(m_Specific(I), m_Value(OpZ))) &&
234 match(I->getOperand(3), m_Zero())) {
235 Value *OpX = I->getOperand(4);
236 Value *OpY = I->getOperand(5);
237 Type *OpTy = OpX->getType();
238
240 Value *V =
241 IC.Builder.CreateIntrinsic(Intrinsic::arm_mve_vmldava, {OpTy},
242 {I->getOperand(0), I->getOperand(1),
243 I->getOperand(2), OpZ, OpX, OpY});
244
246 return IC.eraseInstFromFunction(*User);
247 }
248 }
249 return std::nullopt;
250 }
251 }
252 return std::nullopt;
253}
254
256 InstCombiner &IC, IntrinsicInst &II, APInt OrigDemandedElts,
257 APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3,
258 std::function<void(Instruction *, unsigned, APInt, APInt &)>
259 SimplifyAndSetOp) const {
260
261 // Compute the demanded bits for a narrowing MVE intrinsic. The TopOpc is the
262 // opcode specifying a Top/Bottom instruction, which can change between
263 // instructions.
264 auto SimplifyNarrowInstrTopBottom =[&](unsigned TopOpc) {
265 unsigned NumElts = cast<FixedVectorType>(II.getType())->getNumElements();
266 unsigned IsTop = cast<ConstantInt>(II.getOperand(TopOpc))->getZExtValue();
267
268 // The only odd/even lanes of operand 0 will only be demanded depending
269 // on whether this is a top/bottom instruction.
270 APInt DemandedElts =
271 APInt::getSplat(NumElts, IsTop ? APInt::getLowBitsSet(2, 1)
272 : APInt::getHighBitsSet(2, 1));
273 SimplifyAndSetOp(&II, 0, OrigDemandedElts & DemandedElts, UndefElts);
274 // The other lanes will be defined from the inserted elements.
275 UndefElts &= APInt::getSplat(NumElts, IsTop ? APInt::getLowBitsSet(2, 1)
276 : APInt::getHighBitsSet(2, 1));
277 return std::nullopt;
278 };
279
280 switch (II.getIntrinsicID()) {
281 default:
282 break;
283 case Intrinsic::arm_mve_vcvt_narrow:
284 SimplifyNarrowInstrTopBottom(2);
285 break;
286 case Intrinsic::arm_mve_vqmovn:
287 SimplifyNarrowInstrTopBottom(4);
288 break;
289 case Intrinsic::arm_mve_vshrn:
290 SimplifyNarrowInstrTopBottom(7);
291 break;
292 }
293
294 return std::nullopt;
295}
296
299 assert(Ty->isIntegerTy());
300
301 unsigned Bits = Ty->getPrimitiveSizeInBits();
302 if (Bits == 0 || Imm.getActiveBits() >= 64)
303 return 4;
304
305 int64_t SImmVal = Imm.getSExtValue();
306 uint64_t ZImmVal = Imm.getZExtValue();
307 if (!ST->isThumb()) {
308 if ((SImmVal >= 0 && SImmVal < 65536) ||
309 (ARM_AM::getSOImmVal(ZImmVal) != -1) ||
310 (ARM_AM::getSOImmVal(~ZImmVal) != -1))
311 return 1;
312 return ST->hasV6T2Ops() ? 2 : 3;
313 }
314 if (ST->isThumb2()) {
315 if ((SImmVal >= 0 && SImmVal < 65536) ||
316 (ARM_AM::getT2SOImmVal(ZImmVal) != -1) ||
317 (ARM_AM::getT2SOImmVal(~ZImmVal) != -1))
318 return 1;
319 return ST->hasV6T2Ops() ? 2 : 3;
320 }
321 // Thumb1, any i8 imm cost 1.
322 if (Bits == 8 || (SImmVal >= 0 && SImmVal < 256))
323 return 1;
324 if ((~SImmVal < 256) || ARM_AM::isThumbImmShiftedVal(ZImmVal))
325 return 2;
326 // Load from constantpool.
327 return 3;
328}
329
330// Constants smaller than 256 fit in the immediate field of
331// Thumb1 instructions so we return a zero cost and 1 otherwise.
333 const APInt &Imm, Type *Ty) {
334 if (Imm.isNonNegative() && Imm.getLimitedValue() < 256)
335 return 0;
336
337 return 1;
338}
339
340// Checks whether Inst is part of a min(max()) or max(min()) pattern
341// that will match to an SSAT instruction. Returns the instruction being
342// saturated, or null if no saturation pattern was found.
343static Value *isSSATMinMaxPattern(Instruction *Inst, const APInt &Imm) {
344 Value *LHS, *RHS;
345 ConstantInt *C;
347
348 if (InstSPF == SPF_SMAX &&
350 C->getValue() == Imm && Imm.isNegative() && Imm.isNegatedPowerOf2()) {
351
352 auto isSSatMin = [&](Value *MinInst) {
353 if (isa<SelectInst>(MinInst)) {
354 Value *MinLHS, *MinRHS;
355 ConstantInt *MinC;
356 SelectPatternFlavor MinSPF =
357 matchSelectPattern(MinInst, MinLHS, MinRHS).Flavor;
358 if (MinSPF == SPF_SMIN &&
360 MinC->getValue() == ((-Imm) - 1))
361 return true;
362 }
363 return false;
364 };
365
366 if (isSSatMin(Inst->getOperand(1)))
367 return cast<Instruction>(Inst->getOperand(1))->getOperand(1);
368 if (Inst->hasNUses(2) &&
369 (isSSatMin(*Inst->user_begin()) || isSSatMin(*(++Inst->user_begin()))))
370 return Inst->getOperand(1);
371 }
372 return nullptr;
373}
374
375// Look for a FP Saturation pattern, where the instruction can be simplified to
376// a fptosi.sat. max(min(fptosi)). The constant in this case is always free.
377static bool isFPSatMinMaxPattern(Instruction *Inst, const APInt &Imm) {
378 if (Imm.getBitWidth() != 64 ||
379 Imm != APInt::getHighBitsSet(64, 33)) // -2147483648
380 return false;
381 Value *FP = isSSATMinMaxPattern(Inst, Imm);
382 if (!FP && isa<ICmpInst>(Inst) && Inst->hasOneUse())
383 FP = isSSATMinMaxPattern(cast<Instruction>(*Inst->user_begin()), Imm);
384 if (!FP)
385 return false;
386 return isa<FPToSIInst>(FP);
387}
388
390 const APInt &Imm, Type *Ty,
392 Instruction *Inst) {
393 // Division by a constant can be turned into multiplication, but only if we
394 // know it's constant. So it's not so much that the immediate is cheap (it's
395 // not), but that the alternative is worse.
396 // FIXME: this is probably unneeded with GlobalISel.
397 if ((Opcode == Instruction::SDiv || Opcode == Instruction::UDiv ||
398 Opcode == Instruction::SRem || Opcode == Instruction::URem) &&
399 Idx == 1)
400 return 0;
401
402 // Leave any gep offsets for the CodeGenPrepare, which will do a better job at
403 // splitting any large offsets.
404 if (Opcode == Instruction::GetElementPtr && Idx != 0)
405 return 0;
406
407 if (Opcode == Instruction::And) {
408 // UXTB/UXTH
409 if (Imm == 255 || Imm == 65535)
410 return 0;
411 // Conversion to BIC is free, and means we can use ~Imm instead.
412 return std::min(getIntImmCost(Imm, Ty, CostKind),
413 getIntImmCost(~Imm, Ty, CostKind));
414 }
415
416 if (Opcode == Instruction::Add)
417 // Conversion to SUB is free, and means we can use -Imm instead.
418 return std::min(getIntImmCost(Imm, Ty, CostKind),
419 getIntImmCost(-Imm, Ty, CostKind));
420
421 if (Opcode == Instruction::ICmp && Imm.isNegative() &&
422 Ty->getIntegerBitWidth() == 32) {
423 int64_t NegImm = -Imm.getSExtValue();
424 if (ST->isThumb2() && NegImm < 1<<12)
425 // icmp X, #-C -> cmn X, #C
426 return 0;
427 if (ST->isThumb() && NegImm < 1<<8)
428 // icmp X, #-C -> adds X, #C
429 return 0;
430 }
431
432 // xor a, -1 can always be folded to MVN
433 if (Opcode == Instruction::Xor && Imm.isAllOnes())
434 return 0;
435
436 // Ensures negative constant of min(max()) or max(min()) patterns that
437 // match to SSAT instructions don't get hoisted
438 if (Inst && ((ST->hasV6Ops() && !ST->isThumb()) || ST->isThumb2()) &&
439 Ty->getIntegerBitWidth() <= 32) {
440 if (isSSATMinMaxPattern(Inst, Imm) ||
441 (isa<ICmpInst>(Inst) && Inst->hasOneUse() &&
442 isSSATMinMaxPattern(cast<Instruction>(*Inst->user_begin()), Imm)))
443 return 0;
444 }
445
446 if (Inst && ST->hasVFP2Base() && isFPSatMinMaxPattern(Inst, Imm))
447 return 0;
448
449 // We can convert <= -1 to < 0, which is generally quite cheap.
450 if (Inst && Opcode == Instruction::ICmp && Idx == 1 && Imm.isAllOnes()) {
451 ICmpInst::Predicate Pred = cast<ICmpInst>(Inst)->getPredicate();
452 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SLE)
453 return std::min(getIntImmCost(Imm, Ty, CostKind),
454 getIntImmCost(Imm + 1, Ty, CostKind));
455 }
456
457 return getIntImmCost(Imm, Ty, CostKind);
458}
459
462 const Instruction *I) {
464 (ST->hasNEON() || ST->hasMVEIntegerOps())) {
465 // FIXME: The vectorizer is highly sensistive to the cost of these
466 // instructions, which suggests that it may be using the costs incorrectly.
467 // But, for now, just make them free to avoid performance regressions for
468 // vector targets.
469 return 0;
470 }
471 return BaseT::getCFInstrCost(Opcode, CostKind, I);
472}
473
475 Type *Src,
478 const Instruction *I) {
479 int ISD = TLI->InstructionOpcodeToISD(Opcode);
480 assert(ISD && "Invalid opcode");
481
482 // TODO: Allow non-throughput costs that aren't binary.
483 auto AdjustCost = [&CostKind](InstructionCost Cost) -> InstructionCost {
485 return Cost == 0 ? 0 : 1;
486 return Cost;
487 };
488 auto IsLegalFPType = [this](EVT VT) {
489 EVT EltVT = VT.getScalarType();
490 return (EltVT == MVT::f32 && ST->hasVFP2Base()) ||
491 (EltVT == MVT::f64 && ST->hasFP64()) ||
492 (EltVT == MVT::f16 && ST->hasFullFP16());
493 };
494
495 EVT SrcTy = TLI->getValueType(DL, Src);
496 EVT DstTy = TLI->getValueType(DL, Dst);
497
498 if (!SrcTy.isSimple() || !DstTy.isSimple())
499 return AdjustCost(
500 BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
501
502 // Extending masked load/Truncating masked stores is expensive because we
503 // currently don't split them. This means that we'll likely end up
504 // loading/storing each element individually (hence the high cost).
505 if ((ST->hasMVEIntegerOps() &&
506 (Opcode == Instruction::Trunc || Opcode == Instruction::ZExt ||
507 Opcode == Instruction::SExt)) ||
508 (ST->hasMVEFloatOps() &&
509 (Opcode == Instruction::FPExt || Opcode == Instruction::FPTrunc) &&
510 IsLegalFPType(SrcTy) && IsLegalFPType(DstTy)))
511 if (CCH == TTI::CastContextHint::Masked && DstTy.getSizeInBits() > 128)
512 return 2 * DstTy.getVectorNumElements() *
514
515 // The extend of other kinds of load is free
516 if (CCH == TTI::CastContextHint::Normal ||
518 static const TypeConversionCostTblEntry LoadConversionTbl[] = {
519 {ISD::SIGN_EXTEND, MVT::i32, MVT::i16, 0},
520 {ISD::ZERO_EXTEND, MVT::i32, MVT::i16, 0},
521 {ISD::SIGN_EXTEND, MVT::i32, MVT::i8, 0},
522 {ISD::ZERO_EXTEND, MVT::i32, MVT::i8, 0},
523 {ISD::SIGN_EXTEND, MVT::i16, MVT::i8, 0},
524 {ISD::ZERO_EXTEND, MVT::i16, MVT::i8, 0},
525 {ISD::SIGN_EXTEND, MVT::i64, MVT::i32, 1},
526 {ISD::ZERO_EXTEND, MVT::i64, MVT::i32, 1},
527 {ISD::SIGN_EXTEND, MVT::i64, MVT::i16, 1},
528 {ISD::ZERO_EXTEND, MVT::i64, MVT::i16, 1},
529 {ISD::SIGN_EXTEND, MVT::i64, MVT::i8, 1},
530 {ISD::ZERO_EXTEND, MVT::i64, MVT::i8, 1},
531 };
532 if (const auto *Entry = ConvertCostTableLookup(
533 LoadConversionTbl, ISD, DstTy.getSimpleVT(), SrcTy.getSimpleVT()))
534 return AdjustCost(Entry->Cost);
535
536 static const TypeConversionCostTblEntry MVELoadConversionTbl[] = {
537 {ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 0},
538 {ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 0},
539 {ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 0},
540 {ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 0},
541 {ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 0},
542 {ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 0},
543 // The following extend from a legal type to an illegal type, so need to
544 // split the load. This introduced an extra load operation, but the
545 // extend is still "free".
546 {ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1},
547 {ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1},
548 {ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 3},
549 {ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 3},
550 {ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1},
551 {ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1},
552 };
553 if (SrcTy.isVector() && ST->hasMVEIntegerOps()) {
554 if (const auto *Entry =
555 ConvertCostTableLookup(MVELoadConversionTbl, ISD,
556 DstTy.getSimpleVT(), SrcTy.getSimpleVT()))
557 return Entry->Cost * ST->getMVEVectorCostFactor(CostKind);
558 }
559
560 static const TypeConversionCostTblEntry MVEFLoadConversionTbl[] = {
561 // FPExtends are similar but also require the VCVT instructions.
562 {ISD::FP_EXTEND, MVT::v4f32, MVT::v4f16, 1},
563 {ISD::FP_EXTEND, MVT::v8f32, MVT::v8f16, 3},
564 };
565 if (SrcTy.isVector() && ST->hasMVEFloatOps()) {
566 if (const auto *Entry =
567 ConvertCostTableLookup(MVEFLoadConversionTbl, ISD,
568 DstTy.getSimpleVT(), SrcTy.getSimpleVT()))
569 return Entry->Cost * ST->getMVEVectorCostFactor(CostKind);
570 }
571
572 // The truncate of a store is free. This is the mirror of extends above.
573 static const TypeConversionCostTblEntry MVEStoreConversionTbl[] = {
574 {ISD::TRUNCATE, MVT::v4i32, MVT::v4i16, 0},
575 {ISD::TRUNCATE, MVT::v4i32, MVT::v4i8, 0},
576 {ISD::TRUNCATE, MVT::v8i16, MVT::v8i8, 0},
577 {ISD::TRUNCATE, MVT::v8i32, MVT::v8i16, 1},
578 {ISD::TRUNCATE, MVT::v8i32, MVT::v8i8, 1},
579 {ISD::TRUNCATE, MVT::v16i32, MVT::v16i8, 3},
580 {ISD::TRUNCATE, MVT::v16i16, MVT::v16i8, 1},
581 };
582 if (SrcTy.isVector() && ST->hasMVEIntegerOps()) {
583 if (const auto *Entry =
584 ConvertCostTableLookup(MVEStoreConversionTbl, ISD,
585 SrcTy.getSimpleVT(), DstTy.getSimpleVT()))
586 return Entry->Cost * ST->getMVEVectorCostFactor(CostKind);
587 }
588
589 static const TypeConversionCostTblEntry MVEFStoreConversionTbl[] = {
590 {ISD::FP_ROUND, MVT::v4f32, MVT::v4f16, 1},
591 {ISD::FP_ROUND, MVT::v8f32, MVT::v8f16, 3},
592 };
593 if (SrcTy.isVector() && ST->hasMVEFloatOps()) {
594 if (const auto *Entry =
595 ConvertCostTableLookup(MVEFStoreConversionTbl, ISD,
596 SrcTy.getSimpleVT(), DstTy.getSimpleVT()))
597 return Entry->Cost * ST->getMVEVectorCostFactor(CostKind);
598 }
599 }
600
601 // NEON vector operations that can extend their inputs.
602 if ((ISD == ISD::SIGN_EXTEND || ISD == ISD::ZERO_EXTEND) &&
603 I && I->hasOneUse() && ST->hasNEON() && SrcTy.isVector()) {
604 static const TypeConversionCostTblEntry NEONDoubleWidthTbl[] = {
605 // vaddl
606 { ISD::ADD, MVT::v4i32, MVT::v4i16, 0 },
607 { ISD::ADD, MVT::v8i16, MVT::v8i8, 0 },
608 // vsubl
609 { ISD::SUB, MVT::v4i32, MVT::v4i16, 0 },
610 { ISD::SUB, MVT::v8i16, MVT::v8i8, 0 },
611 // vmull
612 { ISD::MUL, MVT::v4i32, MVT::v4i16, 0 },
613 { ISD::MUL, MVT::v8i16, MVT::v8i8, 0 },
614 // vshll
615 { ISD::SHL, MVT::v4i32, MVT::v4i16, 0 },
616 { ISD::SHL, MVT::v8i16, MVT::v8i8, 0 },
617 };
618
619 auto *User = cast<Instruction>(*I->user_begin());
620 int UserISD = TLI->InstructionOpcodeToISD(User->getOpcode());
621 if (auto *Entry = ConvertCostTableLookup(NEONDoubleWidthTbl, UserISD,
622 DstTy.getSimpleVT(),
623 SrcTy.getSimpleVT())) {
624 return AdjustCost(Entry->Cost);
625 }
626 }
627
628 // Single to/from double precision conversions.
629 if (Src->isVectorTy() && ST->hasNEON() &&
630 ((ISD == ISD::FP_ROUND && SrcTy.getScalarType() == MVT::f64 &&
631 DstTy.getScalarType() == MVT::f32) ||
632 (ISD == ISD::FP_EXTEND && SrcTy.getScalarType() == MVT::f32 &&
633 DstTy.getScalarType() == MVT::f64))) {
634 static const CostTblEntry NEONFltDblTbl[] = {
635 // Vector fptrunc/fpext conversions.
636 {ISD::FP_ROUND, MVT::v2f64, 2},
637 {ISD::FP_EXTEND, MVT::v2f32, 2},
638 {ISD::FP_EXTEND, MVT::v4f32, 4}};
639
640 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Src);
641 if (const auto *Entry = CostTableLookup(NEONFltDblTbl, ISD, LT.second))
642 return AdjustCost(LT.first * Entry->Cost);
643 }
644
645 // Some arithmetic, load and store operations have specific instructions
646 // to cast up/down their types automatically at no extra cost.
647 // TODO: Get these tables to know at least what the related operations are.
648 static const TypeConversionCostTblEntry NEONVectorConversionTbl[] = {
649 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
650 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
651 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 1 },
652 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 1 },
653 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 0 },
654 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 },
655
656 // The number of vmovl instructions for the extension.
657 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
658 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
659 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 2 },
660 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2 },
661 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i8, 3 },
662 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i8, 3 },
663 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i16, 2 },
664 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i16, 2 },
665 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
666 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
667 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
668 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
669 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 7 },
670 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 7 },
671 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 6 },
672 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 6 },
673 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
674 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
675
676 // Operations that we legalize using splitting.
677 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 6 },
678 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 },
679
680 // Vector float <-> i32 conversions.
681 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
682 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
683
684 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 },
685 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 },
686 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i16, 2 },
687 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i16, 2 },
688 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
689 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
690 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 },
691 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 },
692 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
693 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
694 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
695 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
696 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
697 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
698 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 },
699 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 },
700 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 8 },
701 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 8 },
702 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 4 },
703 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 4 },
704
705 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 },
706 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 },
707 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 3 },
708 { ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f32, 3 },
709 { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 },
710 { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 },
711
712 // Vector double <-> i32 conversions.
713 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
714 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
715
716 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 },
717 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 },
718 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 3 },
719 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 3 },
720 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
721 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
722
723 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 2 },
724 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 2 },
725 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f32, 4 },
726 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f32, 4 },
727 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v16f32, 8 },
728 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v16f32, 8 }
729 };
730
731 if (SrcTy.isVector() && ST->hasNEON()) {
732 if (const auto *Entry = ConvertCostTableLookup(NEONVectorConversionTbl, ISD,
733 DstTy.getSimpleVT(),
734 SrcTy.getSimpleVT()))
735 return AdjustCost(Entry->Cost);
736 }
737
738 // Scalar float to integer conversions.
739 static const TypeConversionCostTblEntry NEONFloatConversionTbl[] = {
740 { ISD::FP_TO_SINT, MVT::i1, MVT::f32, 2 },
741 { ISD::FP_TO_UINT, MVT::i1, MVT::f32, 2 },
742 { ISD::FP_TO_SINT, MVT::i1, MVT::f64, 2 },
743 { ISD::FP_TO_UINT, MVT::i1, MVT::f64, 2 },
744 { ISD::FP_TO_SINT, MVT::i8, MVT::f32, 2 },
745 { ISD::FP_TO_UINT, MVT::i8, MVT::f32, 2 },
746 { ISD::FP_TO_SINT, MVT::i8, MVT::f64, 2 },
747 { ISD::FP_TO_UINT, MVT::i8, MVT::f64, 2 },
748 { ISD::FP_TO_SINT, MVT::i16, MVT::f32, 2 },
749 { ISD::FP_TO_UINT, MVT::i16, MVT::f32, 2 },
750 { ISD::FP_TO_SINT, MVT::i16, MVT::f64, 2 },
751 { ISD::FP_TO_UINT, MVT::i16, MVT::f64, 2 },
752 { ISD::FP_TO_SINT, MVT::i32, MVT::f32, 2 },
753 { ISD::FP_TO_UINT, MVT::i32, MVT::f32, 2 },
754 { ISD::FP_TO_SINT, MVT::i32, MVT::f64, 2 },
755 { ISD::FP_TO_UINT, MVT::i32, MVT::f64, 2 },
756 { ISD::FP_TO_SINT, MVT::i64, MVT::f32, 10 },
757 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 10 },
758 { ISD::FP_TO_SINT, MVT::i64, MVT::f64, 10 },
759 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 10 }
760 };
761 if (SrcTy.isFloatingPoint() && ST->hasNEON()) {
762 if (const auto *Entry = ConvertCostTableLookup(NEONFloatConversionTbl, ISD,
763 DstTy.getSimpleVT(),
764 SrcTy.getSimpleVT()))
765 return AdjustCost(Entry->Cost);
766 }
767
768 // Scalar integer to float conversions.
769 static const TypeConversionCostTblEntry NEONIntegerConversionTbl[] = {
770 { ISD::SINT_TO_FP, MVT::f32, MVT::i1, 2 },
771 { ISD::UINT_TO_FP, MVT::f32, MVT::i1, 2 },
772 { ISD::SINT_TO_FP, MVT::f64, MVT::i1, 2 },
773 { ISD::UINT_TO_FP, MVT::f64, MVT::i1, 2 },
774 { ISD::SINT_TO_FP, MVT::f32, MVT::i8, 2 },
775 { ISD::UINT_TO_FP, MVT::f32, MVT::i8, 2 },
776 { ISD::SINT_TO_FP, MVT::f64, MVT::i8, 2 },
777 { ISD::UINT_TO_FP, MVT::f64, MVT::i8, 2 },
778 { ISD::SINT_TO_FP, MVT::f32, MVT::i16, 2 },
779 { ISD::UINT_TO_FP, MVT::f32, MVT::i16, 2 },
780 { ISD::SINT_TO_FP, MVT::f64, MVT::i16, 2 },
781 { ISD::UINT_TO_FP, MVT::f64, MVT::i16, 2 },
782 { ISD::SINT_TO_FP, MVT::f32, MVT::i32, 2 },
783 { ISD::UINT_TO_FP, MVT::f32, MVT::i32, 2 },
784 { ISD::SINT_TO_FP, MVT::f64, MVT::i32, 2 },
785 { ISD::UINT_TO_FP, MVT::f64, MVT::i32, 2 },
786 { ISD::SINT_TO_FP, MVT::f32, MVT::i64, 10 },
787 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 10 },
788 { ISD::SINT_TO_FP, MVT::f64, MVT::i64, 10 },
789 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 10 }
790 };
791
792 if (SrcTy.isInteger() && ST->hasNEON()) {
793 if (const auto *Entry = ConvertCostTableLookup(NEONIntegerConversionTbl,
794 ISD, DstTy.getSimpleVT(),
795 SrcTy.getSimpleVT()))
796 return AdjustCost(Entry->Cost);
797 }
798
799 // MVE extend costs, taken from codegen tests. i8->i16 or i16->i32 is one
800 // instruction, i8->i32 is two. i64 zexts are an VAND with a constant, sext
801 // are linearised so take more.
802 static const TypeConversionCostTblEntry MVEVectorConversionTbl[] = {
803 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
804 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
805 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 2 },
806 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2 },
807 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i8, 10 },
808 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i8, 2 },
809 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
810 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
811 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i16, 10 },
812 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i16, 2 },
813 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 8 },
814 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 2 },
815 };
816
817 if (SrcTy.isVector() && ST->hasMVEIntegerOps()) {
818 if (const auto *Entry = ConvertCostTableLookup(MVEVectorConversionTbl,
819 ISD, DstTy.getSimpleVT(),
820 SrcTy.getSimpleVT()))
821 return Entry->Cost * ST->getMVEVectorCostFactor(CostKind);
822 }
823
824 if (ISD == ISD::FP_ROUND || ISD == ISD::FP_EXTEND) {
825 // As general rule, fp converts that were not matched above are scalarized
826 // and cost 1 vcvt for each lane, so long as the instruction is available.
827 // If not it will become a series of function calls.
828 const InstructionCost CallCost =
829 getCallInstrCost(nullptr, Dst, {Src}, CostKind);
830 int Lanes = 1;
831 if (SrcTy.isFixedLengthVector())
832 Lanes = SrcTy.getVectorNumElements();
833
834 if (IsLegalFPType(SrcTy) && IsLegalFPType(DstTy))
835 return Lanes;
836 else
837 return Lanes * CallCost;
838 }
839
840 if (ISD == ISD::TRUNCATE && ST->hasMVEIntegerOps() &&
841 SrcTy.isFixedLengthVector()) {
842 // Treat a truncate with larger than legal source (128bits for MVE) as
843 // expensive, 2 instructions per lane.
844 if ((SrcTy.getScalarType() == MVT::i8 ||
845 SrcTy.getScalarType() == MVT::i16 ||
846 SrcTy.getScalarType() == MVT::i32) &&
847 SrcTy.getSizeInBits() > 128 &&
848 SrcTy.getSizeInBits() > DstTy.getSizeInBits())
849 return SrcTy.getVectorNumElements() * 2;
850 }
851
852 // Scalar integer conversion costs.
853 static const TypeConversionCostTblEntry ARMIntegerConversionTbl[] = {
854 // i16 -> i64 requires two dependent operations.
855 { ISD::SIGN_EXTEND, MVT::i64, MVT::i16, 2 },
856
857 // Truncates on i64 are assumed to be free.
858 { ISD::TRUNCATE, MVT::i32, MVT::i64, 0 },
859 { ISD::TRUNCATE, MVT::i16, MVT::i64, 0 },
860 { ISD::TRUNCATE, MVT::i8, MVT::i64, 0 },
861 { ISD::TRUNCATE, MVT::i1, MVT::i64, 0 }
862 };
863
864 if (SrcTy.isInteger()) {
865 if (const auto *Entry = ConvertCostTableLookup(ARMIntegerConversionTbl, ISD,
866 DstTy.getSimpleVT(),
867 SrcTy.getSimpleVT()))
868 return AdjustCost(Entry->Cost);
869 }
870
871 int BaseCost = ST->hasMVEIntegerOps() && Src->isVectorTy()
873 : 1;
874 return AdjustCost(
875 BaseCost * BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
876}
877
880 unsigned Index, Value *Op0,
881 Value *Op1) {
882 // Penalize inserting into an D-subregister. We end up with a three times
883 // lower estimated throughput on swift.
884 if (ST->hasSlowLoadDSubregister() && Opcode == Instruction::InsertElement &&
885 ValTy->isVectorTy() && ValTy->getScalarSizeInBits() <= 32)
886 return 3;
887
888 if (ST->hasNEON() && (Opcode == Instruction::InsertElement ||
889 Opcode == Instruction::ExtractElement)) {
890 // Cross-class copies are expensive on many microarchitectures,
891 // so assume they are expensive by default.
892 if (cast<VectorType>(ValTy)->getElementType()->isIntegerTy())
893 return 3;
894
895 // Even if it's not a cross class copy, this likely leads to mixing
896 // of NEON and VFP code and should be therefore penalized.
897 if (ValTy->isVectorTy() &&
898 ValTy->getScalarSizeInBits() <= 32)
899 return std::max<InstructionCost>(
900 BaseT::getVectorInstrCost(Opcode, ValTy, CostKind, Index, Op0, Op1),
901 2U);
902 }
903
904 if (ST->hasMVEIntegerOps() && (Opcode == Instruction::InsertElement ||
905 Opcode == Instruction::ExtractElement)) {
906 // Integer cross-lane moves are more expensive than float, which can
907 // sometimes just be vmovs. Integer involve being passes to GPR registers,
908 // causing more of a delay.
909 std::pair<InstructionCost, MVT> LT =
911 return LT.first * (ValTy->getScalarType()->isIntegerTy() ? 4 : 1);
912 }
913
914 return BaseT::getVectorInstrCost(Opcode, ValTy, CostKind, Index, Op0, Op1);
915}
916
918 Type *CondTy,
919 CmpInst::Predicate VecPred,
921 const Instruction *I) {
922 int ISD = TLI->InstructionOpcodeToISD(Opcode);
923
924 // Thumb scalar code size cost for select.
925 if (CostKind == TTI::TCK_CodeSize && ISD == ISD::SELECT &&
926 ST->isThumb() && !ValTy->isVectorTy()) {
927 // Assume expensive structs.
928 if (TLI->getValueType(DL, ValTy, true) == MVT::Other)
929 return TTI::TCC_Expensive;
930
931 // Select costs can vary because they:
932 // - may require one or more conditional mov (including an IT),
933 // - can't operate directly on immediates,
934 // - require live flags, which we can't copy around easily.
936
937 // Possible IT instruction for Thumb2, or more for Thumb1.
938 ++Cost;
939
940 // i1 values may need rematerialising by using mov immediates and/or
941 // flag setting instructions.
942 if (ValTy->isIntegerTy(1))
943 ++Cost;
944
945 return Cost;
946 }
947
948 // If this is a vector min/max/abs, use the cost of that intrinsic directly
949 // instead. Hopefully when min/max intrinsics are more prevalent this code
950 // will not be needed.
951 const Instruction *Sel = I;
952 if ((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) && Sel &&
953 Sel->hasOneUse())
954 Sel = cast<Instruction>(Sel->user_back());
955 if (Sel && ValTy->isVectorTy() &&
956 (ValTy->isIntOrIntVectorTy() || ValTy->isFPOrFPVectorTy())) {
957 const Value *LHS, *RHS;
959 unsigned IID = 0;
960 switch (SPF) {
961 case SPF_ABS:
962 IID = Intrinsic::abs;
963 break;
964 case SPF_SMIN:
965 IID = Intrinsic::smin;
966 break;
967 case SPF_SMAX:
968 IID = Intrinsic::smax;
969 break;
970 case SPF_UMIN:
971 IID = Intrinsic::umin;
972 break;
973 case SPF_UMAX:
974 IID = Intrinsic::umax;
975 break;
976 case SPF_FMINNUM:
977 IID = Intrinsic::minnum;
978 break;
979 case SPF_FMAXNUM:
980 IID = Intrinsic::maxnum;
981 break;
982 default:
983 break;
984 }
985 if (IID) {
986 // The ICmp is free, the select gets the cost of the min/max/etc
987 if (Sel != I)
988 return 0;
989 IntrinsicCostAttributes CostAttrs(IID, ValTy, {ValTy, ValTy});
990 return getIntrinsicInstrCost(CostAttrs, CostKind);
991 }
992 }
993
994 // On NEON a vector select gets lowered to vbsl.
995 if (ST->hasNEON() && ValTy->isVectorTy() && ISD == ISD::SELECT && CondTy) {
996 // Lowering of some vector selects is currently far from perfect.
997 static const TypeConversionCostTblEntry NEONVectorSelectTbl[] = {
998 { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4*4 + 1*2 + 1 },
999 { ISD::SELECT, MVT::v8i1, MVT::v8i64, 50 },
1000 { ISD::SELECT, MVT::v16i1, MVT::v16i64, 100 }
1001 };
1002
1003 EVT SelCondTy = TLI->getValueType(DL, CondTy);
1004 EVT SelValTy = TLI->getValueType(DL, ValTy);
1005 if (SelCondTy.isSimple() && SelValTy.isSimple()) {
1006 if (const auto *Entry = ConvertCostTableLookup(NEONVectorSelectTbl, ISD,
1007 SelCondTy.getSimpleVT(),
1008 SelValTy.getSimpleVT()))
1009 return Entry->Cost;
1010 }
1011
1012 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
1013 return LT.first;
1014 }
1015
1016 if (ST->hasMVEIntegerOps() && ValTy->isVectorTy() &&
1017 (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) &&
1018 cast<FixedVectorType>(ValTy)->getNumElements() > 1) {
1019 FixedVectorType *VecValTy = cast<FixedVectorType>(ValTy);
1020 FixedVectorType *VecCondTy = dyn_cast_or_null<FixedVectorType>(CondTy);
1021 if (!VecCondTy)
1022 VecCondTy = cast<FixedVectorType>(CmpInst::makeCmpResultType(VecValTy));
1023
1024 // If we don't have mve.fp any fp operations will need to be scalarized.
1025 if (Opcode == Instruction::FCmp && !ST->hasMVEFloatOps()) {
1026 // One scalaization insert, one scalarization extract and the cost of the
1027 // fcmps.
1028 return BaseT::getScalarizationOverhead(VecValTy, /*Insert*/ false,
1029 /*Extract*/ true, CostKind) +
1030 BaseT::getScalarizationOverhead(VecCondTy, /*Insert*/ true,
1031 /*Extract*/ false, CostKind) +
1032 VecValTy->getNumElements() *
1033 getCmpSelInstrCost(Opcode, ValTy->getScalarType(),
1034 VecCondTy->getScalarType(), VecPred,
1035 CostKind, I);
1036 }
1037
1038 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
1039 int BaseCost = ST->getMVEVectorCostFactor(CostKind);
1040 // There are two types - the input that specifies the type of the compare
1041 // and the output vXi1 type. Because we don't know how the output will be
1042 // split, we may need an expensive shuffle to get two in sync. This has the
1043 // effect of making larger than legal compares (v8i32 for example)
1044 // expensive.
1045 if (LT.second.isVector() && LT.second.getVectorNumElements() > 2) {
1046 if (LT.first > 1)
1047 return LT.first * BaseCost +
1048 BaseT::getScalarizationOverhead(VecCondTy, /*Insert*/ true,
1049 /*Extract*/ false, CostKind);
1050 return BaseCost;
1051 }
1052 }
1053
1054 // Default to cheap (throughput/size of 1 instruction) but adjust throughput
1055 // for "multiple beats" potentially needed by MVE instructions.
1056 int BaseCost = 1;
1057 if (ST->hasMVEIntegerOps() && ValTy->isVectorTy())
1058 BaseCost = ST->getMVEVectorCostFactor(CostKind);
1059
1060 return BaseCost *
1061 BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
1062}
1063
1065 ScalarEvolution *SE,
1066 const SCEV *Ptr) {
1067 // Address computations in vectorized code with non-consecutive addresses will
1068 // likely result in more instructions compared to scalar code where the
1069 // computation can more often be merged into the index mode. The resulting
1070 // extra micro-ops can significantly decrease throughput.
1071 unsigned NumVectorInstToHideOverhead = 10;
1072 int MaxMergeDistance = 64;
1073
1074 if (ST->hasNEON()) {
1075 if (Ty->isVectorTy() && SE &&
1076 !BaseT::isConstantStridedAccessLessThan(SE, Ptr, MaxMergeDistance + 1))
1077 return NumVectorInstToHideOverhead;
1078
1079 // In many cases the address computation is not merged into the instruction
1080 // addressing mode.
1081 return 1;
1082 }
1083 return BaseT::getAddressComputationCost(Ty, SE, Ptr);
1084}
1085
1087 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1088 // If a VCTP is part of a chain, it's already profitable and shouldn't be
1089 // optimized, else LSR may block tail-predication.
1090 switch (II->getIntrinsicID()) {
1091 case Intrinsic::arm_mve_vctp8:
1092 case Intrinsic::arm_mve_vctp16:
1093 case Intrinsic::arm_mve_vctp32:
1094 case Intrinsic::arm_mve_vctp64:
1095 return true;
1096 default:
1097 break;
1098 }
1099 }
1100 return false;
1101}
1102
1103bool ARMTTIImpl::isLegalMaskedLoad(Type *DataTy, Align Alignment) {
1104 if (!EnableMaskedLoadStores || !ST->hasMVEIntegerOps())
1105 return false;
1106
1107 if (auto *VecTy = dyn_cast<FixedVectorType>(DataTy)) {
1108 // Don't support v2i1 yet.
1109 if (VecTy->getNumElements() == 2)
1110 return false;
1111
1112 // We don't support extending fp types.
1113 unsigned VecWidth = DataTy->getPrimitiveSizeInBits();
1114 if (VecWidth != 128 && VecTy->getElementType()->isFloatingPointTy())
1115 return false;
1116 }
1117
1118 unsigned EltWidth = DataTy->getScalarSizeInBits();
1119 return (EltWidth == 32 && Alignment >= 4) ||
1120 (EltWidth == 16 && Alignment >= 2) || (EltWidth == 8);
1121}
1122
1124 if (!EnableMaskedGatherScatters || !ST->hasMVEIntegerOps())
1125 return false;
1126
1127 unsigned EltWidth = Ty->getScalarSizeInBits();
1128 return ((EltWidth == 32 && Alignment >= 4) ||
1129 (EltWidth == 16 && Alignment >= 2) || EltWidth == 8);
1130}
1131
1132/// Given a memcpy/memset/memmove instruction, return the number of memory
1133/// operations performed, via querying findOptimalMemOpLowering. Returns -1 if a
1134/// call is used.
1136 MemOp MOp;
1137 unsigned DstAddrSpace = ~0u;
1138 unsigned SrcAddrSpace = ~0u;
1139 const Function *F = I->getParent()->getParent();
1140
1141 if (const auto *MC = dyn_cast<MemTransferInst>(I)) {
1142 ConstantInt *C = dyn_cast<ConstantInt>(MC->getLength());
1143 // If 'size' is not a constant, a library call will be generated.
1144 if (!C)
1145 return -1;
1146
1147 const unsigned Size = C->getValue().getZExtValue();
1148 const Align DstAlign = *MC->getDestAlign();
1149 const Align SrcAlign = *MC->getSourceAlign();
1150
1151 MOp = MemOp::Copy(Size, /*DstAlignCanChange*/ false, DstAlign, SrcAlign,
1152 /*IsVolatile*/ false);
1153 DstAddrSpace = MC->getDestAddressSpace();
1154 SrcAddrSpace = MC->getSourceAddressSpace();
1155 }
1156 else if (const auto *MS = dyn_cast<MemSetInst>(I)) {
1157 ConstantInt *C = dyn_cast<ConstantInt>(MS->getLength());
1158 // If 'size' is not a constant, a library call will be generated.
1159 if (!C)
1160 return -1;
1161
1162 const unsigned Size = C->getValue().getZExtValue();
1163 const Align DstAlign = *MS->getDestAlign();
1164
1165 MOp = MemOp::Set(Size, /*DstAlignCanChange*/ false, DstAlign,
1166 /*IsZeroMemset*/ false, /*IsVolatile*/ false);
1167 DstAddrSpace = MS->getDestAddressSpace();
1168 }
1169 else
1170 llvm_unreachable("Expected a memcpy/move or memset!");
1171
1172 unsigned Limit, Factor = 2;
1173 switch(I->getIntrinsicID()) {
1174 case Intrinsic::memcpy:
1175 Limit = TLI->getMaxStoresPerMemcpy(F->hasMinSize());
1176 break;
1177 case Intrinsic::memmove:
1178 Limit = TLI->getMaxStoresPerMemmove(F->hasMinSize());
1179 break;
1180 case Intrinsic::memset:
1181 Limit = TLI->getMaxStoresPerMemset(F->hasMinSize());
1182 Factor = 1;
1183 break;
1184 default:
1185 llvm_unreachable("Expected a memcpy/move or memset!");
1186 }
1187
1188 // MemOps will be poplulated with a list of data types that needs to be
1189 // loaded and stored. That's why we multiply the number of elements by 2 to
1190 // get the cost for this memcpy.
1191 std::vector<EVT> MemOps;
1192 if (getTLI()->findOptimalMemOpLowering(
1193 MemOps, Limit, MOp, DstAddrSpace,
1194 SrcAddrSpace, F->getAttributes()))
1195 return MemOps.size() * Factor;
1196
1197 // If we can't find an optimal memop lowering, return the default cost
1198 return -1;
1199}
1200
1202 int NumOps = getNumMemOps(cast<IntrinsicInst>(I));
1203
1204 // To model the cost of a library call, we assume 1 for the call, and
1205 // 3 for the argument setup.
1206 if (NumOps == -1)
1207 return 4;
1208 return NumOps;
1209}
1210
1212 VectorType *Tp, ArrayRef<int> Mask,
1214 int Index, VectorType *SubTp,
1216 Kind = improveShuffleKindFromMask(Kind, Mask, Tp, Index, SubTp);
1217 // Treat extractsubvector as single op permutation.
1218 bool IsExtractSubvector = Kind == TTI::SK_ExtractSubvector;
1219 if (IsExtractSubvector)
1221 if (ST->hasNEON()) {
1222 if (Kind == TTI::SK_Broadcast) {
1223 static const CostTblEntry NEONDupTbl[] = {
1224 // VDUP handles these cases.
1225 {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
1226 {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
1227 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
1228 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
1229 {ISD::VECTOR_SHUFFLE, MVT::v4i16, 1},
1230 {ISD::VECTOR_SHUFFLE, MVT::v8i8, 1},
1231
1232 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1},
1233 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1},
1234 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1},
1235 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 1}};
1236
1237 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Tp);
1238 if (const auto *Entry =
1239 CostTableLookup(NEONDupTbl, ISD::VECTOR_SHUFFLE, LT.second))
1240 return LT.first * Entry->Cost;
1241 }
1242 if (Kind == TTI::SK_Reverse) {
1243 static const CostTblEntry NEONShuffleTbl[] = {
1244 // Reverse shuffle cost one instruction if we are shuffling within a
1245 // double word (vrev) or two if we shuffle a quad word (vrev, vext).
1246 {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
1247 {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
1248 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
1249 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
1250 {ISD::VECTOR_SHUFFLE, MVT::v4i16, 1},
1251 {ISD::VECTOR_SHUFFLE, MVT::v8i8, 1},
1252
1253 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
1254 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
1255 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 2},
1256 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 2}};
1257
1258 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Tp);
1259 if (const auto *Entry =
1260 CostTableLookup(NEONShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second))
1261 return LT.first * Entry->Cost;
1262 }
1263 if (Kind == TTI::SK_Select) {
1264 static const CostTblEntry NEONSelShuffleTbl[] = {
1265 // Select shuffle cost table for ARM. Cost is the number of
1266 // instructions
1267 // required to create the shuffled vector.
1268
1269 {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
1270 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
1271 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
1272 {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
1273
1274 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
1275 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
1276 {ISD::VECTOR_SHUFFLE, MVT::v4i16, 2},
1277
1278 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 16},
1279
1280 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 32}};
1281
1282 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Tp);
1283 if (const auto *Entry = CostTableLookup(NEONSelShuffleTbl,
1284 ISD::VECTOR_SHUFFLE, LT.second))
1285 return LT.first * Entry->Cost;
1286 }
1287 }
1288 if (ST->hasMVEIntegerOps()) {
1289 if (Kind == TTI::SK_Broadcast) {
1290 static const CostTblEntry MVEDupTbl[] = {
1291 // VDUP handles these cases.
1292 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1},
1293 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1},
1294 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 1},
1295 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1},
1296 {ISD::VECTOR_SHUFFLE, MVT::v8f16, 1}};
1297
1298 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Tp);
1299 if (const auto *Entry = CostTableLookup(MVEDupTbl, ISD::VECTOR_SHUFFLE,
1300 LT.second))
1301 return LT.first * Entry->Cost *
1303 }
1304
1305 if (!Mask.empty()) {
1306 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Tp);
1307 if (LT.second.isVector() &&
1308 Mask.size() <= LT.second.getVectorNumElements() &&
1309 (isVREVMask(Mask, LT.second, 16) || isVREVMask(Mask, LT.second, 32) ||
1310 isVREVMask(Mask, LT.second, 64)))
1311 return ST->getMVEVectorCostFactor(TTI::TCK_RecipThroughput) * LT.first;
1312 }
1313 }
1314
1315 // Restore optimal kind.
1316 if (IsExtractSubvector)
1318 int BaseCost = ST->hasMVEIntegerOps() && Tp->isVectorTy()
1320 : 1;
1321 return BaseCost *
1322 BaseT::getShuffleCost(Kind, Tp, Mask, CostKind, Index, SubTp);
1323}
1324
1326 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
1329 const Instruction *CxtI) {
1330 int ISDOpcode = TLI->InstructionOpcodeToISD(Opcode);
1331 if (ST->isThumb() && CostKind == TTI::TCK_CodeSize && Ty->isIntegerTy(1)) {
1332 // Make operations on i1 relatively expensive as this often involves
1333 // combining predicates. AND and XOR should be easier to handle with IT
1334 // blocks.
1335 switch (ISDOpcode) {
1336 default:
1337 break;
1338 case ISD::AND:
1339 case ISD::XOR:
1340 return 2;
1341 case ISD::OR:
1342 return 3;
1343 }
1344 }
1345
1346 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
1347
1348 if (ST->hasNEON()) {
1349 const unsigned FunctionCallDivCost = 20;
1350 const unsigned ReciprocalDivCost = 10;
1351 static const CostTblEntry CostTbl[] = {
1352 // Division.
1353 // These costs are somewhat random. Choose a cost of 20 to indicate that
1354 // vectorizing devision (added function call) is going to be very expensive.
1355 // Double registers types.
1356 { ISD::SDIV, MVT::v1i64, 1 * FunctionCallDivCost},
1357 { ISD::UDIV, MVT::v1i64, 1 * FunctionCallDivCost},
1358 { ISD::SREM, MVT::v1i64, 1 * FunctionCallDivCost},
1359 { ISD::UREM, MVT::v1i64, 1 * FunctionCallDivCost},
1360 { ISD::SDIV, MVT::v2i32, 2 * FunctionCallDivCost},
1361 { ISD::UDIV, MVT::v2i32, 2 * FunctionCallDivCost},
1362 { ISD::SREM, MVT::v2i32, 2 * FunctionCallDivCost},
1363 { ISD::UREM, MVT::v2i32, 2 * FunctionCallDivCost},
1364 { ISD::SDIV, MVT::v4i16, ReciprocalDivCost},
1365 { ISD::UDIV, MVT::v4i16, ReciprocalDivCost},
1366 { ISD::SREM, MVT::v4i16, 4 * FunctionCallDivCost},
1367 { ISD::UREM, MVT::v4i16, 4 * FunctionCallDivCost},
1368 { ISD::SDIV, MVT::v8i8, ReciprocalDivCost},
1369 { ISD::UDIV, MVT::v8i8, ReciprocalDivCost},
1370 { ISD::SREM, MVT::v8i8, 8 * FunctionCallDivCost},
1371 { ISD::UREM, MVT::v8i8, 8 * FunctionCallDivCost},
1372 // Quad register types.
1373 { ISD::SDIV, MVT::v2i64, 2 * FunctionCallDivCost},
1374 { ISD::UDIV, MVT::v2i64, 2 * FunctionCallDivCost},
1375 { ISD::SREM, MVT::v2i64, 2 * FunctionCallDivCost},
1376 { ISD::UREM, MVT::v2i64, 2 * FunctionCallDivCost},
1377 { ISD::SDIV, MVT::v4i32, 4 * FunctionCallDivCost},
1378 { ISD::UDIV, MVT::v4i32, 4 * FunctionCallDivCost},
1379 { ISD::SREM, MVT::v4i32, 4 * FunctionCallDivCost},
1380 { ISD::UREM, MVT::v4i32, 4 * FunctionCallDivCost},
1381 { ISD::SDIV, MVT::v8i16, 8 * FunctionCallDivCost},
1382 { ISD::UDIV, MVT::v8i16, 8 * FunctionCallDivCost},
1383 { ISD::SREM, MVT::v8i16, 8 * FunctionCallDivCost},
1384 { ISD::UREM, MVT::v8i16, 8 * FunctionCallDivCost},
1385 { ISD::SDIV, MVT::v16i8, 16 * FunctionCallDivCost},
1386 { ISD::UDIV, MVT::v16i8, 16 * FunctionCallDivCost},
1387 { ISD::SREM, MVT::v16i8, 16 * FunctionCallDivCost},
1388 { ISD::UREM, MVT::v16i8, 16 * FunctionCallDivCost},
1389 // Multiplication.
1390 };
1391
1392 if (const auto *Entry = CostTableLookup(CostTbl, ISDOpcode, LT.second))
1393 return LT.first * Entry->Cost;
1394
1396 Opcode, Ty, CostKind, Op1Info, Op2Info);
1397
1398 // This is somewhat of a hack. The problem that we are facing is that SROA
1399 // creates a sequence of shift, and, or instructions to construct values.
1400 // These sequences are recognized by the ISel and have zero-cost. Not so for
1401 // the vectorized code. Because we have support for v2i64 but not i64 those
1402 // sequences look particularly beneficial to vectorize.
1403 // To work around this we increase the cost of v2i64 operations to make them
1404 // seem less beneficial.
1405 if (LT.second == MVT::v2i64 && Op2Info.isUniform() && Op2Info.isConstant())
1406 Cost += 4;
1407
1408 return Cost;
1409 }
1410
1411 // If this operation is a shift on arm/thumb2, it might well be folded into
1412 // the following instruction, hence having a cost of 0.
1413 auto LooksLikeAFreeShift = [&]() {
1414 if (ST->isThumb1Only() || Ty->isVectorTy())
1415 return false;
1416
1417 if (!CxtI || !CxtI->hasOneUse() || !CxtI->isShift())
1418 return false;
1419 if (!Op2Info.isUniform() || !Op2Info.isConstant())
1420 return false;
1421
1422 // Folded into a ADC/ADD/AND/BIC/CMP/EOR/MVN/ORR/ORN/RSB/SBC/SUB
1423 switch (cast<Instruction>(CxtI->user_back())->getOpcode()) {
1424 case Instruction::Add:
1425 case Instruction::Sub:
1426 case Instruction::And:
1427 case Instruction::Xor:
1428 case Instruction::Or:
1429 case Instruction::ICmp:
1430 return true;
1431 default:
1432 return false;
1433 }
1434 };
1435 if (LooksLikeAFreeShift())
1436 return 0;
1437
1438 // Default to cheap (throughput/size of 1 instruction) but adjust throughput
1439 // for "multiple beats" potentially needed by MVE instructions.
1440 int BaseCost = 1;
1441 if (ST->hasMVEIntegerOps() && Ty->isVectorTy())
1442 BaseCost = ST->getMVEVectorCostFactor(CostKind);
1443
1444 // The rest of this mostly follows what is done in BaseT::getArithmeticInstrCost,
1445 // without treating floats as more expensive that scalars or increasing the
1446 // costs for custom operations. The results is also multiplied by the
1447 // MVEVectorCostFactor where appropriate.
1448 if (TLI->isOperationLegalOrCustomOrPromote(ISDOpcode, LT.second))
1449 return LT.first * BaseCost;
1450
1451 // Else this is expand, assume that we need to scalarize this op.
1452 if (auto *VTy = dyn_cast<FixedVectorType>(Ty)) {
1453 unsigned Num = VTy->getNumElements();
1456 // Return the cost of multiple scalar invocation plus the cost of
1457 // inserting and extracting the values.
1458 SmallVector<Type *> Tys(Args.size(), Ty);
1459 return BaseT::getScalarizationOverhead(VTy, Args, Tys, CostKind) +
1460 Num * Cost;
1461 }
1462
1463 return BaseCost;
1464}
1465
1467 MaybeAlign Alignment,
1468 unsigned AddressSpace,
1470 TTI::OperandValueInfo OpInfo,
1471 const Instruction *I) {
1472 // TODO: Handle other cost kinds.
1474 return 1;
1475
1476 // Type legalization can't handle structs
1477 if (TLI->getValueType(DL, Src, true) == MVT::Other)
1478 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
1479 CostKind);
1480
1481 if (ST->hasNEON() && Src->isVectorTy() &&
1482 (Alignment && *Alignment != Align(16)) &&
1483 cast<VectorType>(Src)->getElementType()->isDoubleTy()) {
1484 // Unaligned loads/stores are extremely inefficient.
1485 // We need 4 uops for vst.1/vld.1 vs 1uop for vldr/vstr.
1486 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Src);
1487 return LT.first * 4;
1488 }
1489
1490 // MVE can optimize a fpext(load(4xhalf)) using an extending integer load.
1491 // Same for stores.
1492 if (ST->hasMVEFloatOps() && isa<FixedVectorType>(Src) && I &&
1493 ((Opcode == Instruction::Load && I->hasOneUse() &&
1494 isa<FPExtInst>(*I->user_begin())) ||
1495 (Opcode == Instruction::Store && isa<FPTruncInst>(I->getOperand(0))))) {
1496 FixedVectorType *SrcVTy = cast<FixedVectorType>(Src);
1497 Type *DstTy =
1498 Opcode == Instruction::Load
1499 ? (*I->user_begin())->getType()
1500 : cast<Instruction>(I->getOperand(0))->getOperand(0)->getType();
1501 if (SrcVTy->getNumElements() == 4 && SrcVTy->getScalarType()->isHalfTy() &&
1502 DstTy->getScalarType()->isFloatTy())
1503 return ST->getMVEVectorCostFactor(CostKind);
1504 }
1505
1506 int BaseCost = ST->hasMVEIntegerOps() && Src->isVectorTy()
1508 : 1;
1509 return BaseCost * BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
1510 CostKind, OpInfo, I);
1511}
1512
1514ARMTTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
1515 unsigned AddressSpace,
1517 if (ST->hasMVEIntegerOps()) {
1518 if (Opcode == Instruction::Load && isLegalMaskedLoad(Src, Alignment))
1519 return ST->getMVEVectorCostFactor(CostKind);
1520 if (Opcode == Instruction::Store && isLegalMaskedStore(Src, Alignment))
1521 return ST->getMVEVectorCostFactor(CostKind);
1522 }
1523 if (!isa<FixedVectorType>(Src))
1524 return BaseT::getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
1525 CostKind);
1526 // Scalar cost, which is currently very high due to the efficiency of the
1527 // generated code.
1528 return cast<FixedVectorType>(Src)->getNumElements() * 8;
1529}
1530
1532 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
1533 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
1534 bool UseMaskForCond, bool UseMaskForGaps) {
1535 assert(Factor >= 2 && "Invalid interleave factor");
1536 assert(isa<VectorType>(VecTy) && "Expect a vector type");
1537
1538 // vldN/vstN doesn't support vector types of i64/f64 element.
1539 bool EltIs64Bits = DL.getTypeSizeInBits(VecTy->getScalarType()) == 64;
1540
1541 if (Factor <= TLI->getMaxSupportedInterleaveFactor() && !EltIs64Bits &&
1542 !UseMaskForCond && !UseMaskForGaps) {
1543 unsigned NumElts = cast<FixedVectorType>(VecTy)->getNumElements();
1544 auto *SubVecTy =
1545 FixedVectorType::get(VecTy->getScalarType(), NumElts / Factor);
1546
1547 // vldN/vstN only support legal vector types of size 64 or 128 in bits.
1548 // Accesses having vector types that are a multiple of 128 bits can be
1549 // matched to more than one vldN/vstN instruction.
1550 int BaseCost =
1551 ST->hasMVEIntegerOps() ? ST->getMVEVectorCostFactor(CostKind) : 1;
1552 if (NumElts % Factor == 0 &&
1553 TLI->isLegalInterleavedAccessType(Factor, SubVecTy, Alignment, DL))
1554 return Factor * BaseCost * TLI->getNumInterleavedAccesses(SubVecTy, DL);
1555
1556 // Some smaller than legal interleaved patterns are cheap as we can make
1557 // use of the vmovn or vrev patterns to interleave a standard load. This is
1558 // true for v4i8, v8i8 and v4i16 at least (but not for v4f16 as it is
1559 // promoted differently). The cost of 2 here is then a load and vrev or
1560 // vmovn.
1561 if (ST->hasMVEIntegerOps() && Factor == 2 && NumElts / Factor > 2 &&
1562 VecTy->isIntOrIntVectorTy() &&
1563 DL.getTypeSizeInBits(SubVecTy).getFixedValue() <= 64)
1564 return 2 * BaseCost;
1565 }
1566
1567 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
1568 Alignment, AddressSpace, CostKind,
1569 UseMaskForCond, UseMaskForGaps);
1570}
1571
1573 unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
1574 Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) {
1575 using namespace PatternMatch;
1576 if (!ST->hasMVEIntegerOps() || !EnableMaskedGatherScatters)
1577 return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
1578 Alignment, CostKind, I);
1579
1580 assert(DataTy->isVectorTy() && "Can't do gather/scatters on scalar!");
1581 auto *VTy = cast<FixedVectorType>(DataTy);
1582
1583 // TODO: Splitting, once we do that.
1584
1585 unsigned NumElems = VTy->getNumElements();
1586 unsigned EltSize = VTy->getScalarSizeInBits();
1587 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(DataTy);
1588
1589 // For now, it is assumed that for the MVE gather instructions the loads are
1590 // all effectively serialised. This means the cost is the scalar cost
1591 // multiplied by the number of elements being loaded. This is possibly very
1592 // conservative, but even so we still end up vectorising loops because the
1593 // cost per iteration for many loops is lower than for scalar loops.
1594 InstructionCost VectorCost =
1595 NumElems * LT.first * ST->getMVEVectorCostFactor(CostKind);
1596 // The scalarization cost should be a lot higher. We use the number of vector
1597 // elements plus the scalarization overhead. If masking is required then a lot
1598 // of little blocks will be needed and potentially a scalarized p0 mask,
1599 // greatly increasing the cost.
1600 InstructionCost ScalarCost =
1601 NumElems * LT.first + (VariableMask ? NumElems * 5 : 0) +
1602 BaseT::getScalarizationOverhead(VTy, /*Insert*/ true, /*Extract*/ false,
1603 CostKind) +
1604 BaseT::getScalarizationOverhead(VTy, /*Insert*/ false, /*Extract*/ true,
1605 CostKind);
1606
1607 if (EltSize < 8 || Alignment < EltSize / 8)
1608 return ScalarCost;
1609
1610 unsigned ExtSize = EltSize;
1611 // Check whether there's a single user that asks for an extended type
1612 if (I != nullptr) {
1613 // Dependent of the caller of this function, a gather instruction will
1614 // either have opcode Instruction::Load or be a call to the masked_gather
1615 // intrinsic
1616 if ((I->getOpcode() == Instruction::Load ||
1617 match(I, m_Intrinsic<Intrinsic::masked_gather>())) &&
1618 I->hasOneUse()) {
1619 const User *Us = *I->users().begin();
1620 if (isa<ZExtInst>(Us) || isa<SExtInst>(Us)) {
1621 // only allow valid type combinations
1622 unsigned TypeSize =
1623 cast<Instruction>(Us)->getType()->getScalarSizeInBits();
1624 if (((TypeSize == 32 && (EltSize == 8 || EltSize == 16)) ||
1625 (TypeSize == 16 && EltSize == 8)) &&
1626 TypeSize * NumElems == 128) {
1627 ExtSize = TypeSize;
1628 }
1629 }
1630 }
1631 // Check whether the input data needs to be truncated
1632 TruncInst *T;
1633 if ((I->getOpcode() == Instruction::Store ||
1634 match(I, m_Intrinsic<Intrinsic::masked_scatter>())) &&
1635 (T = dyn_cast<TruncInst>(I->getOperand(0)))) {
1636 // Only allow valid type combinations
1637 unsigned TypeSize = T->getOperand(0)->getType()->getScalarSizeInBits();
1638 if (((EltSize == 16 && TypeSize == 32) ||
1639 (EltSize == 8 && (TypeSize == 32 || TypeSize == 16))) &&
1640 TypeSize * NumElems == 128)
1641 ExtSize = TypeSize;
1642 }
1643 }
1644
1645 if (ExtSize * NumElems != 128 || NumElems < 4)
1646 return ScalarCost;
1647
1648 // Any (aligned) i32 gather will not need to be scalarised.
1649 if (ExtSize == 32)
1650 return VectorCost;
1651 // For smaller types, we need to ensure that the gep's inputs are correctly
1652 // extended from a small enough value. Other sizes (including i64) are
1653 // scalarized for now.
1654 if (ExtSize != 8 && ExtSize != 16)
1655 return ScalarCost;
1656
1657 if (const auto *BC = dyn_cast<BitCastInst>(Ptr))
1658 Ptr = BC->getOperand(0);
1659 if (const auto *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
1660 if (GEP->getNumOperands() != 2)
1661 return ScalarCost;
1662 unsigned Scale = DL.getTypeAllocSize(GEP->getResultElementType());
1663 // Scale needs to be correct (which is only relevant for i16s).
1664 if (Scale != 1 && Scale * 8 != ExtSize)
1665 return ScalarCost;
1666 // And we need to zext (not sext) the indexes from a small enough type.
1667 if (const auto *ZExt = dyn_cast<ZExtInst>(GEP->getOperand(1))) {
1668 if (ZExt->getOperand(0)->getType()->getScalarSizeInBits() <= ExtSize)
1669 return VectorCost;
1670 }
1671 return ScalarCost;
1672 }
1673 return ScalarCost;
1674}
1675
1678 std::optional<FastMathFlags> FMF,
1680
1681 EVT ValVT = TLI->getValueType(DL, ValTy);
1682 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1683 unsigned EltSize = ValVT.getScalarSizeInBits();
1684
1685 // In general floating point reductions are a series of elementwise
1686 // operations, with free extracts on each step. These are either in-order or
1687 // treewise depending on whether that is allowed by the fast math flags.
1688 if ((ISD == ISD::FADD || ISD == ISD::FMUL) &&
1689 ((EltSize == 32 && ST->hasVFP2Base()) ||
1690 (EltSize == 64 && ST->hasFP64()) ||
1691 (EltSize == 16 && ST->hasFullFP16()))) {
1692 unsigned NumElts = cast<FixedVectorType>(ValTy)->getNumElements();
1693 unsigned VecLimit = ST->hasMVEFloatOps() ? 128 : (ST->hasNEON() ? 64 : -1);
1694 InstructionCost VecCost = 0;
1695 while (!TTI::requiresOrderedReduction(FMF) && isPowerOf2_32(NumElts) &&
1696 NumElts * EltSize > VecLimit) {
1697 Type *VecTy = FixedVectorType::get(ValTy->getElementType(), NumElts / 2);
1698 VecCost += getArithmeticInstrCost(Opcode, VecTy, CostKind);
1699 NumElts /= 2;
1700 }
1701
1702 // For fp16 we need to extract the upper lane elements. MVE can add a
1703 // VREV+FMIN/MAX to perform another vector step instead.
1704 InstructionCost ExtractCost = 0;
1705 if (!TTI::requiresOrderedReduction(FMF) && ST->hasMVEFloatOps() &&
1706 ValVT.getVectorElementType() == MVT::f16 && NumElts == 8) {
1707 VecCost += ST->getMVEVectorCostFactor(CostKind) * 2;
1708 NumElts /= 2;
1709 } else if (ValVT.getVectorElementType() == MVT::f16)
1710 ExtractCost = NumElts / 2;
1711
1712 return VecCost + ExtractCost +
1713 NumElts *
1715 }
1716
1717 if ((ISD == ISD::AND || ISD == ISD::OR || ISD == ISD::XOR) &&
1718 (EltSize == 64 || EltSize == 32 || EltSize == 16 || EltSize == 8)) {
1719 unsigned NumElts = cast<FixedVectorType>(ValTy)->getNumElements();
1720 unsigned VecLimit =
1721 ST->hasMVEIntegerOps() ? 128 : (ST->hasNEON() ? 64 : -1);
1722 InstructionCost VecCost = 0;
1723 while (isPowerOf2_32(NumElts) && NumElts * EltSize > VecLimit) {
1724 Type *VecTy = FixedVectorType::get(ValTy->getElementType(), NumElts / 2);
1725 VecCost += getArithmeticInstrCost(Opcode, VecTy, CostKind);
1726 NumElts /= 2;
1727 }
1728 // For i16/i8, MVE will perform a VREV + VORR/VAND/VEOR for the 64bit vector
1729 // step.
1730 if (ST->hasMVEIntegerOps() && ValVT.getScalarSizeInBits() <= 16 &&
1731 NumElts * EltSize == 64) {
1732 Type *VecTy = FixedVectorType::get(ValTy->getElementType(), NumElts);
1733 VecCost += ST->getMVEVectorCostFactor(CostKind) +
1734 getArithmeticInstrCost(Opcode, VecTy, CostKind);
1735 NumElts /= 2;
1736 }
1737
1738 // From here we extract the elements and perform the and/or/xor.
1739 InstructionCost ExtractCost = NumElts;
1740 return VecCost + ExtractCost +
1741 (NumElts - 1) * getArithmeticInstrCost(
1742 Opcode, ValTy->getElementType(), CostKind);
1743 }
1744
1745 if (!ST->hasMVEIntegerOps() || !ValVT.isSimple() || ISD != ISD::ADD ||
1747 return BaseT::getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind);
1748
1749 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
1750
1751 static const CostTblEntry CostTblAdd[]{
1752 {ISD::ADD, MVT::v16i8, 1},
1753 {ISD::ADD, MVT::v8i16, 1},
1754 {ISD::ADD, MVT::v4i32, 1},
1755 };
1756 if (const auto *Entry = CostTableLookup(CostTblAdd, ISD, LT.second))
1757 return Entry->Cost * ST->getMVEVectorCostFactor(CostKind) * LT.first;
1758
1759 return BaseT::getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind);
1760}
1761
1763 unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *ValTy,
1765 EVT ValVT = TLI->getValueType(DL, ValTy);
1766 EVT ResVT = TLI->getValueType(DL, ResTy);
1767
1768 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1769
1770 switch (ISD) {
1771 case ISD::ADD:
1772 if (ST->hasMVEIntegerOps() && ValVT.isSimple() && ResVT.isSimple()) {
1773 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
1774
1775 // The legal cases are:
1776 // VADDV u/s 8/16/32
1777 // VADDLV u/s 32
1778 // Codegen currently cannot always handle larger than legal vectors very
1779 // well, especially for predicated reductions where the mask needs to be
1780 // split, so restrict to 128bit or smaller input types.
1781 unsigned RevVTSize = ResVT.getSizeInBits();
1782 if (ValVT.getSizeInBits() <= 128 &&
1783 ((LT.second == MVT::v16i8 && RevVTSize <= 32) ||
1784 (LT.second == MVT::v8i16 && RevVTSize <= 32) ||
1785 (LT.second == MVT::v4i32 && RevVTSize <= 64)))
1786 return ST->getMVEVectorCostFactor(CostKind) * LT.first;
1787 }
1788 break;
1789 default:
1790 break;
1791 }
1792 return BaseT::getExtendedReductionCost(Opcode, IsUnsigned, ResTy, ValTy, FMF,
1793 CostKind);
1794}
1795
1798 VectorType *ValTy,
1800 EVT ValVT = TLI->getValueType(DL, ValTy);
1801 EVT ResVT = TLI->getValueType(DL, ResTy);
1802
1803 if (ST->hasMVEIntegerOps() && ValVT.isSimple() && ResVT.isSimple()) {
1804 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
1805
1806 // The legal cases are:
1807 // VMLAV u/s 8/16/32
1808 // VMLALV u/s 16/32
1809 // Codegen currently cannot always handle larger than legal vectors very
1810 // well, especially for predicated reductions where the mask needs to be
1811 // split, so restrict to 128bit or smaller input types.
1812 unsigned RevVTSize = ResVT.getSizeInBits();
1813 if (ValVT.getSizeInBits() <= 128 &&
1814 ((LT.second == MVT::v16i8 && RevVTSize <= 32) ||
1815 (LT.second == MVT::v8i16 && RevVTSize <= 64) ||
1816 (LT.second == MVT::v4i32 && RevVTSize <= 64)))
1817 return ST->getMVEVectorCostFactor(CostKind) * LT.first;
1818 }
1819
1820 return BaseT::getMulAccReductionCost(IsUnsigned, ResTy, ValTy, CostKind);
1821}
1822
1825 FastMathFlags FMF,
1827 EVT ValVT = TLI->getValueType(DL, Ty);
1828
1829 // In general floating point reductions are a series of elementwise
1830 // operations, with free extracts on each step. These are either in-order or
1831 // treewise depending on whether that is allowed by the fast math flags.
1832 if ((IID == Intrinsic::minnum || IID == Intrinsic::maxnum) &&
1833 ((ValVT.getVectorElementType() == MVT::f32 && ST->hasVFP2Base()) ||
1834 (ValVT.getVectorElementType() == MVT::f64 && ST->hasFP64()) ||
1835 (ValVT.getVectorElementType() == MVT::f16 && ST->hasFullFP16()))) {
1836 unsigned NumElts = cast<FixedVectorType>(Ty)->getNumElements();
1837 unsigned EltSize = ValVT.getScalarSizeInBits();
1838 unsigned VecLimit = ST->hasMVEFloatOps() ? 128 : (ST->hasNEON() ? 64 : -1);
1839 InstructionCost VecCost;
1840 while (isPowerOf2_32(NumElts) && NumElts * EltSize > VecLimit) {
1841 Type *VecTy = FixedVectorType::get(Ty->getElementType(), NumElts/2);
1842 IntrinsicCostAttributes ICA(IID, VecTy, {VecTy, VecTy}, FMF);
1843 VecCost += getIntrinsicInstrCost(ICA, CostKind);
1844 NumElts /= 2;
1845 }
1846
1847 // For fp16 we need to extract the upper lane elements. MVE can add a
1848 // VREV+FMIN/MAX to perform another vector step instead.
1849 InstructionCost ExtractCost = 0;
1850 if (ST->hasMVEFloatOps() && ValVT.getVectorElementType() == MVT::f16 &&
1851 NumElts == 8) {
1852 VecCost += ST->getMVEVectorCostFactor(CostKind) * 2;
1853 NumElts /= 2;
1854 } else if (ValVT.getVectorElementType() == MVT::f16)
1855 ExtractCost = cast<FixedVectorType>(Ty)->getNumElements() / 2;
1856
1858 {Ty->getElementType(), Ty->getElementType()},
1859 FMF);
1860 return VecCost + ExtractCost +
1861 (NumElts - 1) * getIntrinsicInstrCost(ICA, CostKind);
1862 }
1863
1864 if (IID == Intrinsic::smin || IID == Intrinsic::smax ||
1865 IID == Intrinsic::umin || IID == Intrinsic::umax) {
1866 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
1867
1868 // All costs are the same for u/s min/max. These lower to vminv, which are
1869 // given a slightly higher cost as they tend to take multiple cycles for
1870 // smaller type sizes.
1871 static const CostTblEntry CostTblAdd[]{
1872 {ISD::SMIN, MVT::v16i8, 4},
1873 {ISD::SMIN, MVT::v8i16, 3},
1874 {ISD::SMIN, MVT::v4i32, 2},
1875 };
1876 if (const auto *Entry = CostTableLookup(CostTblAdd, ISD::SMIN, LT.second))
1877 return Entry->Cost * ST->getMVEVectorCostFactor(CostKind) * LT.first;
1878 }
1879
1880 return BaseT::getMinMaxReductionCost(IID, Ty, FMF, CostKind);
1881}
1882
1886 switch (ICA.getID()) {
1887 case Intrinsic::get_active_lane_mask:
1888 // Currently we make a somewhat optimistic assumption that
1889 // active_lane_mask's are always free. In reality it may be freely folded
1890 // into a tail predicated loop, expanded into a VCPT or expanded into a lot
1891 // of add/icmp code. We may need to improve this in the future, but being
1892 // able to detect if it is free or not involves looking at a lot of other
1893 // code. We currently assume that the vectorizer inserted these, and knew
1894 // what it was doing in adding one.
1895 if (ST->hasMVEIntegerOps())
1896 return 0;
1897 break;
1898 case Intrinsic::sadd_sat:
1899 case Intrinsic::ssub_sat:
1900 case Intrinsic::uadd_sat:
1901 case Intrinsic::usub_sat: {
1902 if (!ST->hasMVEIntegerOps())
1903 break;
1904 Type *VT = ICA.getReturnType();
1905
1906 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(VT);
1907 if (LT.second == MVT::v4i32 || LT.second == MVT::v8i16 ||
1908 LT.second == MVT::v16i8) {
1909 // This is a base cost of 1 for the vqadd, plus 3 extract shifts if we
1910 // need to extend the type, as it uses shr(qadd(shl, shl)).
1911 unsigned Instrs =
1912 LT.second.getScalarSizeInBits() == VT->getScalarSizeInBits() ? 1 : 4;
1913 return LT.first * ST->getMVEVectorCostFactor(CostKind) * Instrs;
1914 }
1915 break;
1916 }
1917 case Intrinsic::abs:
1918 case Intrinsic::smin:
1919 case Intrinsic::smax:
1920 case Intrinsic::umin:
1921 case Intrinsic::umax: {
1922 if (!ST->hasMVEIntegerOps())
1923 break;
1924 Type *VT = ICA.getReturnType();
1925
1926 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(VT);
1927 if (LT.second == MVT::v4i32 || LT.second == MVT::v8i16 ||
1928 LT.second == MVT::v16i8)
1929 return LT.first * ST->getMVEVectorCostFactor(CostKind);
1930 break;
1931 }
1932 case Intrinsic::minnum:
1933 case Intrinsic::maxnum: {
1934 if (!ST->hasMVEFloatOps())
1935 break;
1936 Type *VT = ICA.getReturnType();
1937 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(VT);
1938 if (LT.second == MVT::v4f32 || LT.second == MVT::v8f16)
1939 return LT.first * ST->getMVEVectorCostFactor(CostKind);
1940 break;
1941 }
1942 case Intrinsic::fptosi_sat:
1943 case Intrinsic::fptoui_sat: {
1944 if (ICA.getArgTypes().empty())
1945 break;
1946 bool IsSigned = ICA.getID() == Intrinsic::fptosi_sat;
1947 auto LT = getTypeLegalizationCost(ICA.getArgTypes()[0]);
1948 EVT MTy = TLI->getValueType(DL, ICA.getReturnType());
1949 // Check for the legal types, with the corect subtarget features.
1950 if ((ST->hasVFP2Base() && LT.second == MVT::f32 && MTy == MVT::i32) ||
1951 (ST->hasFP64() && LT.second == MVT::f64 && MTy == MVT::i32) ||
1952 (ST->hasFullFP16() && LT.second == MVT::f16 && MTy == MVT::i32))
1953 return LT.first;
1954
1955 // Equally for MVE vector types
1956 if (ST->hasMVEFloatOps() &&
1957 (LT.second == MVT::v4f32 || LT.second == MVT::v8f16) &&
1958 LT.second.getScalarSizeInBits() == MTy.getScalarSizeInBits())
1959 return LT.first * ST->getMVEVectorCostFactor(CostKind);
1960
1961 // Otherwise we use a legal convert followed by a min+max
1962 if (((ST->hasVFP2Base() && LT.second == MVT::f32) ||
1963 (ST->hasFP64() && LT.second == MVT::f64) ||
1964 (ST->hasFullFP16() && LT.second == MVT::f16) ||
1965 (ST->hasMVEFloatOps() &&
1966 (LT.second == MVT::v4f32 || LT.second == MVT::v8f16))) &&
1967 LT.second.getScalarSizeInBits() >= MTy.getScalarSizeInBits()) {
1968 Type *LegalTy = Type::getIntNTy(ICA.getReturnType()->getContext(),
1969 LT.second.getScalarSizeInBits());
1971 LT.second.isVector() ? ST->getMVEVectorCostFactor(CostKind) : 1;
1972 IntrinsicCostAttributes Attrs1(IsSigned ? Intrinsic::smin
1973 : Intrinsic::umin,
1974 LegalTy, {LegalTy, LegalTy});
1976 IntrinsicCostAttributes Attrs2(IsSigned ? Intrinsic::smax
1977 : Intrinsic::umax,
1978 LegalTy, {LegalTy, LegalTy});
1980 return LT.first * Cost;
1981 }
1982 break;
1983 }
1984 }
1985
1987}
1988
1990 if (!F->isIntrinsic())
1991 return BaseT::isLoweredToCall(F);
1992
1993 // Assume all Arm-specific intrinsics map to an instruction.
1994 if (F->getName().starts_with("llvm.arm"))
1995 return false;
1996
1997 switch (F->getIntrinsicID()) {
1998 default: break;
1999 case Intrinsic::powi:
2000 case Intrinsic::sin:
2001 case Intrinsic::cos:
2002 case Intrinsic::pow:
2003 case Intrinsic::log:
2004 case Intrinsic::log10:
2005 case Intrinsic::log2:
2006 case Intrinsic::exp:
2007 case Intrinsic::exp2:
2008 return true;
2009 case Intrinsic::sqrt:
2010 case Intrinsic::fabs:
2011 case Intrinsic::copysign:
2012 case Intrinsic::floor:
2013 case Intrinsic::ceil:
2014 case Intrinsic::trunc:
2015 case Intrinsic::rint:
2016 case Intrinsic::nearbyint:
2017 case Intrinsic::round:
2018 case Intrinsic::canonicalize:
2019 case Intrinsic::lround:
2020 case Intrinsic::llround:
2021 case Intrinsic::lrint:
2022 case Intrinsic::llrint:
2023 if (F->getReturnType()->isDoubleTy() && !ST->hasFP64())
2024 return true;
2025 if (F->getReturnType()->isHalfTy() && !ST->hasFullFP16())
2026 return true;
2027 // Some operations can be handled by vector instructions and assume
2028 // unsupported vectors will be expanded into supported scalar ones.
2029 // TODO Handle scalar operations properly.
2030 return !ST->hasFPARMv8Base() && !ST->hasVFP2Base();
2031 case Intrinsic::masked_store:
2032 case Intrinsic::masked_load:
2033 case Intrinsic::masked_gather:
2034 case Intrinsic::masked_scatter:
2035 return !ST->hasMVEIntegerOps();
2036 case Intrinsic::sadd_with_overflow:
2037 case Intrinsic::uadd_with_overflow:
2038 case Intrinsic::ssub_with_overflow:
2039 case Intrinsic::usub_with_overflow:
2040 case Intrinsic::sadd_sat:
2041 case Intrinsic::uadd_sat:
2042 case Intrinsic::ssub_sat:
2043 case Intrinsic::usub_sat:
2044 return false;
2045 }
2046
2047 return BaseT::isLoweredToCall(F);
2048}
2049
2051 unsigned ISD = TLI->InstructionOpcodeToISD(I.getOpcode());
2052 EVT VT = TLI->getValueType(DL, I.getType(), true);
2053 if (TLI->getOperationAction(ISD, VT) == TargetLowering::LibCall)
2054 return true;
2055
2056 // Check if an intrinsic will be lowered to a call and assume that any
2057 // other CallInst will generate a bl.
2058 if (auto *Call = dyn_cast<CallInst>(&I)) {
2059 if (auto *II = dyn_cast<IntrinsicInst>(Call)) {
2060 switch(II->getIntrinsicID()) {
2061 case Intrinsic::memcpy:
2062 case Intrinsic::memset:
2063 case Intrinsic::memmove:
2064 return getNumMemOps(II) == -1;
2065 default:
2066 if (const Function *F = Call->getCalledFunction())
2067 return isLoweredToCall(F);
2068 }
2069 }
2070 return true;
2071 }
2072
2073 // FPv5 provides conversions between integer, double-precision,
2074 // single-precision, and half-precision formats.
2075 switch (I.getOpcode()) {
2076 default:
2077 break;
2078 case Instruction::FPToSI:
2079 case Instruction::FPToUI:
2080 case Instruction::SIToFP:
2081 case Instruction::UIToFP:
2082 case Instruction::FPTrunc:
2083 case Instruction::FPExt:
2084 return !ST->hasFPARMv8Base();
2085 }
2086
2087 // FIXME: Unfortunately the approach of checking the Operation Action does
2088 // not catch all cases of Legalization that use library calls. Our
2089 // Legalization step categorizes some transformations into library calls as
2090 // Custom, Expand or even Legal when doing type legalization. So for now
2091 // we have to special case for instance the SDIV of 64bit integers and the
2092 // use of floating point emulation.
2093 if (VT.isInteger() && VT.getSizeInBits() >= 64) {
2094 switch (ISD) {
2095 default:
2096 break;
2097 case ISD::SDIV:
2098 case ISD::UDIV:
2099 case ISD::SREM:
2100 case ISD::UREM:
2101 case ISD::SDIVREM:
2102 case ISD::UDIVREM:
2103 return true;
2104 }
2105 }
2106
2107 // Assume all other non-float operations are supported.
2108 if (!VT.isFloatingPoint())
2109 return false;
2110
2111 // We'll need a library call to handle most floats when using soft.
2112 if (TLI->useSoftFloat()) {
2113 switch (I.getOpcode()) {
2114 default:
2115 return true;
2116 case Instruction::Alloca:
2117 case Instruction::Load:
2118 case Instruction::Store:
2119 case Instruction::Select:
2120 case Instruction::PHI:
2121 return false;
2122 }
2123 }
2124
2125 // We'll need a libcall to perform double precision operations on a single
2126 // precision only FPU.
2127 if (I.getType()->isDoubleTy() && !ST->hasFP64())
2128 return true;
2129
2130 // Likewise for half precision arithmetic.
2131 if (I.getType()->isHalfTy() && !ST->hasFullFP16())
2132 return true;
2133
2134 return false;
2135}
2136
2138 AssumptionCache &AC,
2139 TargetLibraryInfo *LibInfo,
2140 HardwareLoopInfo &HWLoopInfo) {
2141 // Low-overhead branches are only supported in the 'low-overhead branch'
2142 // extension of v8.1-m.
2143 if (!ST->hasLOB() || DisableLowOverheadLoops) {
2144 LLVM_DEBUG(dbgs() << "ARMHWLoops: Disabled\n");
2145 return false;
2146 }
2147
2149 LLVM_DEBUG(dbgs() << "ARMHWLoops: No BETC\n");
2150 return false;
2151 }
2152
2153 const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L);
2154 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) {
2155 LLVM_DEBUG(dbgs() << "ARMHWLoops: Uncomputable BETC\n");
2156 return false;
2157 }
2158
2159 const SCEV *TripCountSCEV =
2160 SE.getAddExpr(BackedgeTakenCount,
2161 SE.getOne(BackedgeTakenCount->getType()));
2162
2163 // We need to store the trip count in LR, a 32-bit register.
2164 if (SE.getUnsignedRangeMax(TripCountSCEV).getBitWidth() > 32) {
2165 LLVM_DEBUG(dbgs() << "ARMHWLoops: Trip count does not fit into 32bits\n");
2166 return false;
2167 }
2168
2169 // Making a call will trash LR and clear LO_BRANCH_INFO, so there's little
2170 // point in generating a hardware loop if that's going to happen.
2171
2172 auto IsHardwareLoopIntrinsic = [](Instruction &I) {
2173 if (auto *Call = dyn_cast<IntrinsicInst>(&I)) {
2174 switch (Call->getIntrinsicID()) {
2175 default:
2176 break;
2177 case Intrinsic::start_loop_iterations:
2178 case Intrinsic::test_start_loop_iterations:
2179 case Intrinsic::loop_decrement:
2180 case Intrinsic::loop_decrement_reg:
2181 return true;
2182 }
2183 }
2184 return false;
2185 };
2186
2187 // Scan the instructions to see if there's any that we know will turn into a
2188 // call or if this loop is already a low-overhead loop or will become a tail
2189 // predicated loop.
2190 bool IsTailPredLoop = false;
2191 auto ScanLoop = [&](Loop *L) {
2192 for (auto *BB : L->getBlocks()) {
2193 for (auto &I : *BB) {
2194 if (maybeLoweredToCall(I) || IsHardwareLoopIntrinsic(I) ||
2195 isa<InlineAsm>(I)) {
2196 LLVM_DEBUG(dbgs() << "ARMHWLoops: Bad instruction: " << I << "\n");
2197 return false;
2198 }
2199 if (auto *II = dyn_cast<IntrinsicInst>(&I))
2200 IsTailPredLoop |=
2201 II->getIntrinsicID() == Intrinsic::get_active_lane_mask ||
2202 II->getIntrinsicID() == Intrinsic::arm_mve_vctp8 ||
2203 II->getIntrinsicID() == Intrinsic::arm_mve_vctp16 ||
2204 II->getIntrinsicID() == Intrinsic::arm_mve_vctp32 ||
2205 II->getIntrinsicID() == Intrinsic::arm_mve_vctp64;
2206 }
2207 }
2208 return true;
2209 };
2210
2211 // Visit inner loops.
2212 for (auto *Inner : *L)
2213 if (!ScanLoop(Inner))
2214 return false;
2215
2216 if (!ScanLoop(L))
2217 return false;
2218
2219 // TODO: Check whether the trip count calculation is expensive. If L is the
2220 // inner loop but we know it has a low trip count, calculating that trip
2221 // count (in the parent loop) may be detrimental.
2222
2223 LLVMContext &C = L->getHeader()->getContext();
2224 HWLoopInfo.CounterInReg = true;
2225 HWLoopInfo.IsNestingLegal = false;
2226 HWLoopInfo.PerformEntryTest = AllowWLSLoops && !IsTailPredLoop;
2227 HWLoopInfo.CountType = Type::getInt32Ty(C);
2228 HWLoopInfo.LoopDecrement = ConstantInt::get(HWLoopInfo.CountType, 1);
2229 return true;
2230}
2231
2232static bool canTailPredicateInstruction(Instruction &I, int &ICmpCount) {
2233 // We don't allow icmp's, and because we only look at single block loops,
2234 // we simply count the icmps, i.e. there should only be 1 for the backedge.
2235 if (isa<ICmpInst>(&I) && ++ICmpCount > 1)
2236 return false;
2237 // FIXME: This is a workaround for poor cost modelling. Min/Max intrinsics are
2238 // not currently canonical, but soon will be. Code without them uses icmp, and
2239 // so is not tail predicated as per the condition above. In order to get the
2240 // same performance we treat min and max the same as an icmp for tailpred
2241 // purposes for the moment (we often rely on non-tailpred and higher VF's to
2242 // pick more optimial instructions like VQDMULH. They need to be recognized
2243 // directly by the vectorizer).
2244 if (auto *II = dyn_cast<IntrinsicInst>(&I))
2245 if ((II->getIntrinsicID() == Intrinsic::smin ||
2246 II->getIntrinsicID() == Intrinsic::smax ||
2247 II->getIntrinsicID() == Intrinsic::umin ||
2248 II->getIntrinsicID() == Intrinsic::umax) &&
2249 ++ICmpCount > 1)
2250 return false;
2251
2252 if (isa<FCmpInst>(&I))
2253 return false;
2254
2255 // We could allow extending/narrowing FP loads/stores, but codegen is
2256 // too inefficient so reject this for now.
2257 if (isa<FPExtInst>(&I) || isa<FPTruncInst>(&I))
2258 return false;
2259
2260 // Extends have to be extending-loads
2261 if (isa<SExtInst>(&I) || isa<ZExtInst>(&I) )
2262 if (!I.getOperand(0)->hasOneUse() || !isa<LoadInst>(I.getOperand(0)))
2263 return false;
2264
2265 // Truncs have to be narrowing-stores
2266 if (isa<TruncInst>(&I) )
2267 if (!I.hasOneUse() || !isa<StoreInst>(*I.user_begin()))
2268 return false;
2269
2270 return true;
2271}
2272
2273// To set up a tail-predicated loop, we need to know the total number of
2274// elements processed by that loop. Thus, we need to determine the element
2275// size and:
2276// 1) it should be uniform for all operations in the vector loop, so we
2277// e.g. don't want any widening/narrowing operations.
2278// 2) it should be smaller than i64s because we don't have vector operations
2279// that work on i64s.
2280// 3) we don't want elements to be reversed or shuffled, to make sure the
2281// tail-predication masks/predicates the right lanes.
2282//
2284 const DataLayout &DL,
2285 const LoopAccessInfo *LAI) {
2286 LLVM_DEBUG(dbgs() << "Tail-predication: checking allowed instructions\n");
2287
2288 // If there are live-out values, it is probably a reduction. We can predicate
2289 // most reduction operations freely under MVE using a combination of
2290 // prefer-predicated-reduction-select and inloop reductions. We limit this to
2291 // floating point and integer reductions, but don't check for operators
2292 // specifically here. If the value ends up not being a reduction (and so the
2293 // vectorizer cannot tailfold the loop), we should fall back to standard
2294 // vectorization automatically.
2296 LiveOuts = llvm::findDefsUsedOutsideOfLoop(L);
2297 bool ReductionsDisabled =
2300
2301 for (auto *I : LiveOuts) {
2302 if (!I->getType()->isIntegerTy() && !I->getType()->isFloatTy() &&
2303 !I->getType()->isHalfTy()) {
2304 LLVM_DEBUG(dbgs() << "Don't tail-predicate loop with non-integer/float "
2305 "live-out value\n");
2306 return false;
2307 }
2308 if (ReductionsDisabled) {
2309 LLVM_DEBUG(dbgs() << "Reductions not enabled\n");
2310 return false;
2311 }
2312 }
2313
2314 // Next, check that all instructions can be tail-predicated.
2315 PredicatedScalarEvolution PSE = LAI->getPSE();
2317 int ICmpCount = 0;
2318
2319 for (BasicBlock *BB : L->blocks()) {
2320 for (Instruction &I : BB->instructionsWithoutDebug()) {
2321 if (isa<PHINode>(&I))
2322 continue;
2323 if (!canTailPredicateInstruction(I, ICmpCount)) {
2324 LLVM_DEBUG(dbgs() << "Instruction not allowed: "; I.dump());
2325 return false;
2326 }
2327
2328 Type *T = I.getType();
2329 if (T->getScalarSizeInBits() > 32) {
2330 LLVM_DEBUG(dbgs() << "Unsupported Type: "; T->dump());
2331 return false;
2332 }
2333 if (isa<StoreInst>(I) || isa<LoadInst>(I)) {
2335 Type *AccessTy = getLoadStoreType(&I);
2336 int64_t NextStride = getPtrStride(PSE, AccessTy, Ptr, L).value_or(0);
2337 if (NextStride == 1) {
2338 // TODO: for now only allow consecutive strides of 1. We could support
2339 // other strides as long as it is uniform, but let's keep it simple
2340 // for now.
2341 continue;
2342 } else if (NextStride == -1 ||
2343 (NextStride == 2 && MVEMaxSupportedInterleaveFactor >= 2) ||
2344 (NextStride == 4 && MVEMaxSupportedInterleaveFactor >= 4)) {
2346 << "Consecutive strides of 2 found, vld2/vstr2 can't "
2347 "be tail-predicated\n.");
2348 return false;
2349 // TODO: don't tail predicate if there is a reversed load?
2350 } else if (EnableMaskedGatherScatters) {
2351 // Gather/scatters do allow loading from arbitrary strides, at
2352 // least if they are loop invariant.
2353 // TODO: Loop variant strides should in theory work, too, but
2354 // this requires further testing.
2355 const SCEV *PtrScev = PSE.getSE()->getSCEV(Ptr);
2356 if (auto AR = dyn_cast<SCEVAddRecExpr>(PtrScev)) {
2357 const SCEV *Step = AR->getStepRecurrence(*PSE.getSE());
2358 if (PSE.getSE()->isLoopInvariant(Step, L))
2359 continue;
2360 }
2361 }
2362 LLVM_DEBUG(dbgs() << "Bad stride found, can't "
2363 "tail-predicate\n.");
2364 return false;
2365 }
2366 }
2367 }
2368
2369 LLVM_DEBUG(dbgs() << "tail-predication: all instructions allowed!\n");
2370 return true;
2371}
2372
2374 if (!EnableTailPredication) {
2375 LLVM_DEBUG(dbgs() << "Tail-predication not enabled.\n");
2376 return false;
2377 }
2378
2379 // Creating a predicated vector loop is the first step for generating a
2380 // tail-predicated hardware loop, for which we need the MVE masked
2381 // load/stores instructions:
2382 if (!ST->hasMVEIntegerOps())
2383 return false;
2384
2385 LoopVectorizationLegality *LVL = TFI->LVL;
2386 Loop *L = LVL->getLoop();
2387
2388 // For now, restrict this to single block loops.
2389 if (L->getNumBlocks() > 1) {
2390 LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: not a single block "
2391 "loop.\n");
2392 return false;
2393 }
2394
2395 assert(L->isInnermost() && "preferPredicateOverEpilogue: inner-loop expected");
2396
2397 LoopInfo *LI = LVL->getLoopInfo();
2398 HardwareLoopInfo HWLoopInfo(L);
2399 if (!HWLoopInfo.canAnalyze(*LI)) {
2400 LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not "
2401 "analyzable.\n");
2402 return false;
2403 }
2404
2407
2408 // This checks if we have the low-overhead branch architecture
2409 // extension, and if we will create a hardware-loop:
2410 if (!isHardwareLoopProfitable(L, *SE, *AC, TFI->TLI, HWLoopInfo)) {
2411 LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not "
2412 "profitable.\n");
2413 return false;
2414 }
2415
2416 DominatorTree *DT = LVL->getDominatorTree();
2417 if (!HWLoopInfo.isHardwareLoopCandidate(*SE, *LI, *DT)) {
2418 LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not "
2419 "a candidate.\n");
2420 return false;
2421 }
2422
2423 return canTailPredicateLoop(L, LI, *SE, DL, LVL->getLAI());
2424}
2425
2427ARMTTIImpl::getPreferredTailFoldingStyle(bool IVUpdateMayOverflow) const {
2428 if (!ST->hasMVEIntegerOps() || !EnableTailPredication)
2430
2431 // Intrinsic @llvm.get.active.lane.mask is supported.
2432 // It is used in the MVETailPredication pass, which requires the number of
2433 // elements processed by this vector loop to setup the tail-predicated
2434 // loop.
2436}
2440 // Enable Upper bound unrolling universally, providing that we do not see an
2441 // active lane mask, which will be better kept as a loop to become tail
2442 // predicated than to be conditionally unrolled.
2443 UP.UpperBound =
2444 !ST->hasMVEIntegerOps() || !any_of(*L->getHeader(), [](Instruction &I) {
2445 return isa<IntrinsicInst>(I) &&
2446 cast<IntrinsicInst>(I).getIntrinsicID() ==
2447 Intrinsic::get_active_lane_mask;
2448 });
2449
2450 // Only currently enable these preferences for M-Class cores.
2451 if (!ST->isMClass())
2452 return BasicTTIImplBase::getUnrollingPreferences(L, SE, UP, ORE);
2453
2454 // Disable loop unrolling for Oz and Os.
2455 UP.OptSizeThreshold = 0;
2457 if (L->getHeader()->getParent()->hasOptSize())
2458 return;
2459
2460 SmallVector<BasicBlock*, 4> ExitingBlocks;
2461 L->getExitingBlocks(ExitingBlocks);
2462 LLVM_DEBUG(dbgs() << "Loop has:\n"
2463 << "Blocks: " << L->getNumBlocks() << "\n"
2464 << "Exit blocks: " << ExitingBlocks.size() << "\n");
2465
2466 // Only allow another exit other than the latch. This acts as an early exit
2467 // as it mirrors the profitability calculation of the runtime unroller.
2468 if (ExitingBlocks.size() > 2)
2469 return;
2470
2471 // Limit the CFG of the loop body for targets with a branch predictor.
2472 // Allowing 4 blocks permits if-then-else diamonds in the body.
2473 if (ST->hasBranchPredictor() && L->getNumBlocks() > 4)
2474 return;
2475
2476 // Don't unroll vectorized loops, including the remainder loop
2477 if (getBooleanLoopAttribute(L, "llvm.loop.isvectorized"))
2478 return;
2479
2480 // Scan the loop: don't unroll loops with calls as this could prevent
2481 // inlining.
2483 for (auto *BB : L->getBlocks()) {
2484 for (auto &I : *BB) {
2485 // Don't unroll vectorised loop. MVE does not benefit from it as much as
2486 // scalar code.
2487 if (I.getType()->isVectorTy())
2488 return;
2489
2490 if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
2491 if (const Function *F = cast<CallBase>(I).getCalledFunction()) {
2492 if (!isLoweredToCall(F))
2493 continue;
2494 }
2495 return;
2496 }
2497
2498 SmallVector<const Value*, 4> Operands(I.operand_values());
2501 }
2502 }
2503
2504 // On v6m cores, there are very few registers available. We can easily end up
2505 // spilling and reloading more registers in an unrolled loop. Look at the
2506 // number of LCSSA phis as a rough measure of how many registers will need to
2507 // be live out of the loop, reducing the default unroll count if more than 1
2508 // value is needed. In the long run, all of this should be being learnt by a
2509 // machine.
2510 unsigned UnrollCount = 4;
2511 if (ST->isThumb1Only()) {
2512 unsigned ExitingValues = 0;
2514 L->getExitBlocks(ExitBlocks);
2515 for (auto *Exit : ExitBlocks) {
2516 // Count the number of LCSSA phis. Exclude values coming from GEP's as
2517 // only the last is expected to be needed for address operands.
2518 unsigned LiveOuts = count_if(Exit->phis(), [](auto &PH) {
2519 return PH.getNumOperands() != 1 ||
2520 !isa<GetElementPtrInst>(PH.getOperand(0));
2521 });
2522 ExitingValues = ExitingValues < LiveOuts ? LiveOuts : ExitingValues;
2523 }
2524 if (ExitingValues)
2525 UnrollCount /= ExitingValues;
2526 if (UnrollCount <= 1)
2527 return;
2528 }
2529
2530 LLVM_DEBUG(dbgs() << "Cost of loop: " << Cost << "\n");
2531 LLVM_DEBUG(dbgs() << "Default Runtime Unroll Count: " << UnrollCount << "\n");
2532
2533 UP.Partial = true;
2534 UP.Runtime = true;
2535 UP.UnrollRemainder = true;
2537 UP.UnrollAndJam = true;
2539
2540 // Force unrolling small loops can be very useful because of the branch
2541 // taken cost of the backedge.
2542 if (Cost < 12)
2543 UP.Force = true;
2544}
2545
2549}
2550
2551bool ARMTTIImpl::preferInLoopReduction(unsigned Opcode, Type *Ty,
2552 TTI::ReductionFlags Flags) const {
2553 if (!ST->hasMVEIntegerOps())
2554 return false;
2555
2556 unsigned ScalarBits = Ty->getScalarSizeInBits();
2557 switch (Opcode) {
2558 case Instruction::Add:
2559 return ScalarBits <= 64;
2560 default:
2561 return false;
2562 }
2563}
2564
2566 unsigned Opcode, Type *Ty, TTI::ReductionFlags Flags) const {
2567 if (!ST->hasMVEIntegerOps())
2568 return false;
2569 return true;
2570}
2571
2573 int64_t BaseOffset,
2574 bool HasBaseReg, int64_t Scale,
2575 unsigned AddrSpace) const {
2577 AM.BaseGV = BaseGV;
2578 AM.BaseOffs = BaseOffset;
2579 AM.HasBaseReg = HasBaseReg;
2580 AM.Scale = Scale;
2581 if (getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace)) {
2582 if (ST->hasFPAO())
2583 return AM.Scale < 0 ? 1 : 0; // positive offsets execute faster
2584 return 0;
2585 }
2586 return -1;
2587}
2588
2589bool ARMTTIImpl::hasArmWideBranch(bool Thumb) const {
2590 if (Thumb) {
2591 // B.W is available in any Thumb2-supporting target, and also in every
2592 // version of Armv8-M, even Baseline which does not include the rest of
2593 // Thumb2.
2594 return ST->isThumb2() || ST->hasV8MBaselineOps();
2595 } else {
2596 // B is available in all versions of the Arm ISA, so the only question is
2597 // whether that ISA is available at all.
2598 return ST->hasARMOps();
2599 }
2600}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file implements a class to represent arbitrary precision integral constant values and operations...
cl::opt< unsigned > MVEMaxSupportedInterleaveFactor("mve-max-interleave-factor", cl::Hidden, cl::desc("Maximum interleave factor for MVE VLDn to generate."), cl::init(2))
static Value * isSSATMinMaxPattern(Instruction *Inst, const APInt &Imm)
static cl::opt< bool > AllowWLSLoops("allow-arm-wlsloops", cl::Hidden, cl::init(true), cl::desc("Enable the generation of WLS loops"))
static Value * simplifyNeonVld1(const IntrinsicInst &II, unsigned MemAlign, InstCombiner::BuilderTy &Builder)
Convert a vector load intrinsic into a simple llvm load instruction.
static bool isFPSatMinMaxPattern(Instruction *Inst, const APInt &Imm)
cl::opt< bool > EnableMaskedGatherScatters
static bool canTailPredicateInstruction(Instruction &I, int &ICmpCount)
cl::opt< unsigned > MVEMaxSupportedInterleaveFactor
cl::opt< TailPredication::Mode > EnableTailPredication
static cl::opt< bool > DisableLowOverheadLoops("disable-arm-loloops", cl::Hidden, cl::init(false), cl::desc("Disable the generation of low-overhead loops"))
static bool canTailPredicateLoop(Loop *L, LoopInfo *LI, ScalarEvolution &SE, const DataLayout &DL, const LoopAccessInfo *LAI)
static cl::opt< bool > EnableMaskedLoadStores("enable-arm-maskedldst", cl::Hidden, cl::init(true), cl::desc("Enable the generation of masked loads and stores"))
This file a TargetTransformInfo::Concept conforming object specific to the ARM target machine.
static cl::opt< TargetTransformInfo::TargetCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(TargetTransformInfo::TCK_RecipThroughput), cl::values(clEnumValN(TargetTransformInfo::TCK_RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(TargetTransformInfo::TCK_Latency, "latency", "Instruction latency"), clEnumValN(TargetTransformInfo::TCK_CodeSize, "code-size", "Code size"), clEnumValN(TargetTransformInfo::TCK_SizeAndLatency, "size-latency", "Code size and latency")))
Cost tables and simple lookup functions.
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
uint64_t Size
Hexagon Common GEP
This file provides the interface for the instcombine pass implementation.
static cl::opt< unsigned > UnrollCount("unroll-count", cl::Hidden, cl::desc("Use this unroll count for all loops including those with " "unroll_count pragma values, for testing purposes"))
This file defines the LoopVectorizationLegality class.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
mir Rename Register Operands
static const Function * getCalledFunction(const Value *V, bool &IsNoBuiltin)
if(VerifyEach)
const char LLVMTargetMachineRef TM
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:40
Value * RHS
Value * LHS
Class for arbitrary precision integers.
Definition: APInt.h:76
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1439
static APInt getSplat(unsigned NewLen, const APInt &V)
Return a value containing V broadcasted over NewLen bits.
Definition: APInt.cpp:620
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition: APInt.h:284
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
Definition: APInt.h:274
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
Definition: APInt.h:217
bool hasARMOps() const
Definition: ARMSubtarget.h:335
bool isThumb1Only() const
Definition: ARMSubtarget.h:434
bool hasFPARMv8Base() const
Definition: ARMSubtarget.h:344
bool isThumb2() const
Definition: ARMSubtarget.h:435
bool hasVFP2Base() const
Definition: ARMSubtarget.h:341
bool isMClass() const
Definition: ARMSubtarget.h:436
unsigned getMVEVectorCostFactor(TargetTransformInfo::TargetCostKind CostKind) const
Definition: ARMSubtarget.h:559
bool preferInLoopReduction(unsigned Opcode, Type *Ty, TTI::ReductionFlags Flags) const
InstructionCost getAddressComputationCost(Type *Val, ScalarEvolution *SE, const SCEV *Ptr)
bool maybeLoweredToCall(Instruction &I)
TailFoldingStyle getPreferredTailFoldingStyle(bool IVUpdateMayOverflow=true) const
bool isLegalMaskedStore(Type *DataTy, Align Alignment)
bool isLegalMaskedLoad(Type *DataTy, Align Alignment)
InstructionCost getMemcpyCost(const Instruction *I)
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args=std::nullopt)
InstructionCost getIntImmCost(const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind)
bool isLoweredToCall(const Function *F)
InstructionCost getMulAccReductionCost(bool IsUnsigned, Type *ResTy, VectorType *ValTy, TTI::TargetCostKind CostKind)
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind)
bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo)
int getNumMemOps(const IntrinsicInst *I) const
Given a memcpy/memset/memmove instruction, return the number of memory operations performed,...
std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const
InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind)
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args=ArrayRef< const Value * >(), const Instruction *CxtI=nullptr)
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE)
InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind)
bool hasArmWideBranch(bool Thumb) const
bool preferPredicateOverEpilogue(TailFoldingInfo *TFI)
InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind, Instruction *Inst=nullptr)
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
bool isLegalMaskedGather(Type *Ty, Align Alignment)
bool areInlineCompatible(const Function *Caller, const Function *Callee) const
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false)
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace) const
getScalingFactorCost - Return the cost of the scaling used in addressing mode represented by AM.
TTI::AddressingModeKind getPreferredAddressingMode(const Loop *L, ScalarEvolution *SE) const
InstructionCost getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty)
InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *ValTy, FastMathFlags FMF, TTI::TargetCostKind CostKind)
bool preferPredicatedReductionSelect(unsigned Opcode, Type *Ty, TTI::ReductionFlags Flags) const
std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) const
InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind)
bool isProfitableLSRChainElement(Instruction *I)
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr)
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP)
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Op0, Value *Op1)
unsigned getNumInterleavedAccesses(VectorType *VecTy, const DataLayout &DL) const
Returns the number of interleaved accesses that will be generated when lowering accesses of the given...
bool isLegalInterleavedAccessType(unsigned Factor, FixedVectorType *VecTy, Align Alignment, const DataLayout &DL) const
Returns true if VecTy is a legal interleaved access type.
bool useSoftFloat() const override
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition: BasicBlock.h:60
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind)
Get intrinsic cost based on arguments.
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false)
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE)
Definition: BasicTTIImpl.h:582
InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *DataTy, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind)
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Op0, Value *Op1)
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args=ArrayRef< const Value * >(), const Instruction *CxtI=nullptr)
Definition: BasicTTIImpl.h:891
TTI::ShuffleKind improveShuffleKindFromMask(TTI::ShuffleKind Kind, ArrayRef< int > Mask, VectorType *Ty, int &Index, VectorType *&SubTy) const
Definition: BasicTTIImpl.h:969
InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind)
InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind)
Try to calculate op costs for min/max reduction operations.
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr)
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args=std::nullopt)
InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
InstructionCost getAddressComputationCost(Type *Ty, ScalarEvolution *, const SCEV *)
InstructionCost getScalarizationOverhead(VectorType *InTy, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind)
Estimate the overhead of scalarizing an instruction.
Definition: BasicTTIImpl.h:762
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP)
Definition: BasicTTIImpl.h:654
InstructionCost getMulAccReductionCost(bool IsUnsigned, Type *ResTy, VectorType *Ty, TTI::TargetCostKind CostKind)
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
InstructionCost getCallInstrCost(Function *F, Type *RetTy, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind)
Compute a cost of the given call instruction.
InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind)
std::pair< InstructionCost, MVT > getTypeLegalizationCost(Type *Ty) const
Estimate the cost of type-legalization and the legalized type.
Definition: BasicTTIImpl.h:855
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace, Instruction *I=nullptr, int64_t ScalableOffset=0)
Definition: BasicTTIImpl.h:339
static BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name, BasicBlock::iterator InsertBefore)
Construct a binary instruction, given the opcode and the two operands.
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1654
unsigned arg_size() const
Definition: InstrTypes.h:1652
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition: InstrTypes.h:1329
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:960
@ ICMP_SLE
signed less or equal
Definition: InstrTypes.h:990
@ ICMP_SGT
signed greater than
Definition: InstrTypes.h:987
static ConstantAsMetadata * get(Constant *C)
Definition: Metadata.h:528
This is the shared class of boolean and integer constants.
Definition: Constants.h:80
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:145
This is an important base class in LLVM.
Definition: Constant.h:41
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:504
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
Definition: DataLayout.h:672
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
Convenience struct for specifying and reasoning about fast-math flags.
Definition: FMF.h:20
Container class for subtarget features.
Class to represent fixed width SIMD vectors.
Definition: DerivedTypes.h:539
unsigned getNumElements() const
Definition: DerivedTypes.h:582
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
Definition: Type.cpp:692
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Definition: IRBuilder.h:1801
Value * CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name="")
Return a vector value that contains.
Definition: IRBuilder.cpp:1214
ConstantInt * getTrue()
Get the constant value for i1 true.
Definition: IRBuilder.h:460
CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
Definition: IRBuilder.cpp:932
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2105
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:180
The core instruction combiner logic.
Definition: InstCombiner.h:47
const DataLayout & getDataLayout() const
Definition: InstCombiner.h:340
virtual Instruction * eraseInstFromFunction(Instruction &I)=0
Combiner aware instruction erasure.
DominatorTree & getDominatorTree() const
Definition: InstCombiner.h:339
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
Definition: InstCombiner.h:385
virtual bool SimplifyDemandedBits(Instruction *I, unsigned OpNo, const APInt &DemandedMask, KnownBits &Known, unsigned Depth=0)=0
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
Definition: InstCombiner.h:409
BuilderTy & Builder
Definition: InstCombiner.h:60
AssumptionCache & getAssumptionCache() const
Definition: InstCombiner.h:337
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
Definition: Instruction.h:149
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:359
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
Definition: Metadata.cpp:1636
bool isShift() const
Definition: Instruction.h:259
const SmallVectorImpl< Type * > & getArgTypes() const
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:47
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:54
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
Drive the analysis of memory accesses in the loop.
const PredicatedScalarEvolution & getPSE() const
Used to add runtime SCEV checks.
LoopVectorizationLegality checks if it is legal to vectorize a loop, and to what vectorization factor...
AssumptionCache * getAssumptionCache() const
const LoopAccessInfo * getLAI() const
ScalarEvolution * getScalarEvolution() const
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:44
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1541
Root of the metadata hierarchy.
Definition: Metadata.h:62
The optimization diagnostic interface.
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
ScalarEvolution * getSE() const
Returns the ScalarEvolution analysis used.
This class represents an analyzed expression in the program.
Type * getType() const
Return the LLVM type of this SCEV expression.
The main scalar evolution driver.
const SCEV * getBackedgeTakenCount(const Loop *L, ExitCountKind Kind=Exact)
If the specified loop has a predictable backedge-taken count, return it, otherwise return a SCEVCould...
const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
bool hasLoopInvariantBackedgeTakenCount(const Loop *L)
Return true if the specified loop has an analyzable loop-invariant backedge-taken count.
APInt getUnsignedRangeMax(const SCEV *S)
Determine the max of the unsigned range for a particular SCEV.
const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
size_t size() const
Definition: SmallVector.h:91
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
Provides information about what library functions are available for the current target.
int InstructionOpcodeToISD(unsigned Opcode) const
Get the ISD node that corresponds to the Instruction class opcode.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
const TargetMachine & getTargetMachine() const
unsigned getMaxStoresPerMemcpy(bool OptSize) const
Get maximum # of store operations permitted for llvm.memcpy.
unsigned getMaxStoresPerMemmove(bool OptSize) const
Get maximum # of store operations permitted for llvm.memmove.
unsigned getMaxStoresPerMemset(bool OptSize) const
Get maximum # of store operations permitted for llvm.memset.
LegalizeAction getOperationAction(unsigned Op, EVT VT) const
Return how this operation should be treated: either it is legal, needs to be promoted to a larger siz...
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:76
bool isConstantStridedAccessLessThan(ScalarEvolution *SE, const SCEV *Ptr, int64_t MergeDistance) const
bool isLoweredToCall(const Function *F) const
InstructionCost getInstructionCost(const User *U, ArrayRef< const Value * > Operands, TTI::TargetCostKind CostKind)
TargetCostKind
The kind of cost model.
@ TCK_RecipThroughput
Reciprocal throughput.
@ TCK_CodeSize
Instruction code size.
@ TCK_SizeAndLatency
The weighted sum of size and latency.
static bool requiresOrderedReduction(std::optional< FastMathFlags > FMF)
A helper function to determine the type of reduction algorithm used for a given Opcode and set of Fas...
@ TCC_Expensive
The cost of a 'div' instruction on x86.
ShuffleKind
The various kinds of shuffle patterns for vector queries.
@ SK_Select
Selects elements from the corresponding lane of either source operand.
@ SK_PermuteSingleSrc
Shuffle elements of single source vector with any shuffle mask.
@ SK_Broadcast
Broadcast element 0 to all other elements.
@ SK_Reverse
Reverse the order of the vector.
@ SK_ExtractSubvector
ExtractSubvector Index indicates start offset.
CastContextHint
Represents a hint about the context in which a cast is used.
@ Masked
The cast is used with a masked load/store.
@ Normal
The cast is used with a normal load/store.
This class represents a truncation of integer types.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:265
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition: Type.h:234
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
Definition: Type.h:154
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isHalfTy() const
Return true if this is 'half', a 16-bit IEEE fp type.
Definition: Type.h:143
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:129
static IntegerType * getInt32Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:228
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition: Type.h:216
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition: Type.h:348
Value * getOperand(unsigned i) const
Definition: User.h:169
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
user_iterator user_begin()
Definition: Value.h:397
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition: Value.h:434
bool hasNUses(unsigned N) const
Return true if this Value has exactly N uses.
Definition: Value.cpp:149
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1074
Base class of all SIMD vector types.
Definition: DerivedTypes.h:403
Type * getElementType() const
Definition: DerivedTypes.h:436
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:187
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
int getSOImmVal(unsigned Arg)
getSOImmVal - Given a 32-bit immediate, if it is something that can fit into an shifter_operand immed...
bool isThumbImmShiftedVal(unsigned V)
isThumbImmShiftedVal - Return true if the specified value can be obtained by left shifting a 8-bit im...
int getT2SOImmVal(unsigned Arg)
getT2SOImmVal - Given a 32-bit immediate, if it is something that can fit into a Thumb-2 shifter_oper...
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ ADD
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:239
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition: ISDOpcodes.h:790
@ FADD
Simple binary floating point operators.
Definition: ISDOpcodes.h:390
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition: ISDOpcodes.h:255
@ SIGN_EXTEND
Conversion operators.
Definition: ISDOpcodes.h:774
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
Definition: ISDOpcodes.h:727
@ SHL
Shift and rotation operations.
Definition: ISDOpcodes.h:705
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
Definition: ISDOpcodes.h:600
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:780
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
Definition: ISDOpcodes.h:674
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
Definition: ISDOpcodes.h:888
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition: ISDOpcodes.h:836
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:680
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
Definition: ISDOpcodes.h:869
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Definition: ISDOpcodes.h:786
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
Definition: PatternMatch.h:160
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
Definition: PatternMatch.h:821
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
Definition: PatternMatch.h:163
BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)
Matches a Add with LHS and RHS in either order.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:92
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
Definition: PatternMatch.h:561
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:450
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
const CostTblEntryT< CostType > * CostTableLookup(ArrayRef< CostTblEntryT< CostType > > Tbl, int ISD, MVT Ty)
Find in cost table.
Definition: CostTable.h:35
bool getBooleanLoopAttribute(const Loop *TheLoop, StringRef Name)
Returns true if Name is applied to TheLoop and enabled.
Definition: LoopInfo.cpp:1085
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
AddressSpace
Definition: NVPTXBaseInfo.h:21
Align getKnownAlignment(Value *V, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to infer an alignment for the specified pointer.
Definition: Local.h:241
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1738
SmallVector< Instruction *, 8 > findDefsUsedOutsideOfLoop(Loop *L)
Returns the instructions that use values defined in the loop.
Definition: LoopUtils.cpp:123
SelectPatternFlavor
Specific patterns of select instructions we can match.
@ SPF_ABS
Floating point maxnum.
@ SPF_FMAXNUM
Floating point minnum.
@ SPF_UMIN
Signed minimum.
@ SPF_UMAX
Signed maximum.
@ SPF_SMAX
Unsigned minimum.
@ SPF_FMINNUM
Unsigned maximum.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:264
SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
std::optional< int64_t > getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy, Value *Ptr, const Loop *Lp, const DenseMap< Value *, const SCEV * > &StridesMap=DenseMap< Value *, const SCEV * >(), bool Assume=false, bool ShouldCheckWrap=true)
If the pointer has a constant stride return it in units of the access type size.
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
Definition: STLExtras.h:1930
bool isVREVMask(ArrayRef< int > M, EVT VT, unsigned BlockSize)
isVREVMask - Check if a vector shuffle corresponds to a VREV instruction with the specified blocksize...
InstructionCost Cost
@ DataWithoutLaneMask
Same as Data, but avoids using the get.active.lane.mask intrinsic to calculate the mask and instead i...
@ Data
Use predicate only to mask operations on data in the loop.
Type * getLoadStoreType(Value *I)
A helper function that returns the type of a load or store instruction.
const TypeConversionCostTblEntryT< CostType > * ConvertCostTableLookup(ArrayRef< TypeConversionCostTblEntryT< CostType > > Tbl, int ISD, MVT Dst, MVT Src)
Find in type conversion cost table.
Definition: CostTable.h:66
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
Cost Table Entry.
Definition: CostTable.h:25
Extended Value Type.
Definition: ValueTypes.h:34
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition: ValueTypes.h:136
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
Definition: ValueTypes.h:146
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:358
uint64_t getScalarSizeInBits() const
Definition: ValueTypes.h:370
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:306
bool isFixedLengthVector() const
Definition: ValueTypes.h:177
bool isVector() const
Return true if this is a vector value type.
Definition: ValueTypes.h:167
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Definition: ValueTypes.h:313
EVT getVectorElementType() const
Given a vector type, return the type of each element.
Definition: ValueTypes.h:318
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
Definition: ValueTypes.h:326
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition: ValueTypes.h:151
Attributes of a target dependent hardware loop.
bool canAnalyze(LoopInfo &LI)
bool isHardwareLoopCandidate(ScalarEvolution &SE, LoopInfo &LI, DominatorTree &DT, bool ForceNestedLoop=false, bool ForceHardwareLoopPHI=false)
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
static MemOp Set(uint64_t Size, bool DstAlignCanChange, Align DstAlign, bool IsZeroMemset, bool IsVolatile)
static MemOp Copy(uint64_t Size, bool DstAlignCanChange, Align DstAlign, Align SrcAlign, bool IsVolatile, bool MemcpyStrSrc=false)
SelectPatternFlavor Flavor
TargetLibraryInfo * TLI
LoopVectorizationLegality * LVL
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
Flags describing the kind of vector reduction.
Parameters that control the generic loop unrolling transformation.
bool UpperBound
Allow using trip count upper bound to unroll loops.
bool Force
Apply loop unroll on any kind of loop (mainly to loops that fail runtime unrolling).
unsigned PartialOptSizeThreshold
The cost threshold for the unrolled loop when optimizing for size, like OptSizeThreshold,...
unsigned DefaultUnrollRuntimeCount
Default unroll count for loops with run-time trip count.
unsigned UnrollAndJamInnerLoopThreshold
Threshold for unroll and jam, for inner loop size.
bool UnrollAndJam
Allow unroll and jam. Used to enable unroll and jam for the target.
bool UnrollRemainder
Allow unrolling of all the iterations of the runtime loop remainder.
bool Runtime
Allow runtime unrolling (unrolling of loops to expand the size of the loop body even when the number ...
bool Partial
Allow partial unrolling (unrolling of loops to expand the size of the loop body, not only to eliminat...
unsigned OptSizeThreshold
The cost threshold for the unrolled loop when optimizing for size (set to UINT_MAX to disable).
Type Conversion Cost Table.
Definition: CostTable.h:55