LLVM 23.0.0git
AArch64TargetTransformInfo.cpp
Go to the documentation of this file.
1//===-- AArch64TargetTransformInfo.cpp - AArch64 specific TTI -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
10#include "AArch64ExpandImm.h"
14#include "llvm/ADT/DenseMap.h"
22#include "llvm/IR/Intrinsics.h"
23#include "llvm/IR/IntrinsicsAArch64.h"
25#include "llvm/Support/Debug.h"
30#include <algorithm>
31#include <optional>
32using namespace llvm;
33using namespace llvm::PatternMatch;
34
35#define DEBUG_TYPE "aarch64tti"
36
37static cl::opt<bool> EnableFalkorHWPFUnrollFix("enable-falkor-hwpf-unroll-fix",
38 cl::init(true), cl::Hidden);
39
41 "sve-prefer-fixed-over-scalable-if-equal", cl::Hidden);
42
43static cl::opt<unsigned> SVEGatherOverhead("sve-gather-overhead", cl::init(10),
45
46static cl::opt<unsigned> SVEScatterOverhead("sve-scatter-overhead",
47 cl::init(10), cl::Hidden);
48
49static cl::opt<unsigned> SVETailFoldInsnThreshold("sve-tail-folding-insn-threshold",
50 cl::init(15), cl::Hidden);
51
53 NeonNonConstStrideOverhead("neon-nonconst-stride-overhead", cl::init(10),
55
57 "call-penalty-sm-change", cl::init(5), cl::Hidden,
59 "Penalty of calling a function that requires a change to PSTATE.SM"));
60
62 "inline-call-penalty-sm-change", cl::init(10), cl::Hidden,
63 cl::desc("Penalty of inlining a call that requires a change to PSTATE.SM"));
64
65static cl::opt<bool> EnableOrLikeSelectOpt("enable-aarch64-or-like-select",
66 cl::init(true), cl::Hidden);
67
68static cl::opt<bool> EnableLSRCostOpt("enable-aarch64-lsr-cost-opt",
69 cl::init(true), cl::Hidden);
70
71// A complete guess as to a reasonable cost.
73 BaseHistCntCost("aarch64-base-histcnt-cost", cl::init(8), cl::Hidden,
74 cl::desc("The cost of a histcnt instruction"));
75
77 "dmb-lookahead-threshold", cl::init(10), cl::Hidden,
78 cl::desc("The number of instructions to search for a redundant dmb"));
79
81 "aarch64-force-unroll-threshold", cl::init(0), cl::Hidden,
82 cl::desc("Threshold for forced unrolling of small loops in AArch64"));
83
84namespace {
85class TailFoldingOption {
86 // These bitfields will only ever be set to something non-zero in operator=,
87 // when setting the -sve-tail-folding option. This option should always be of
88 // the form (default|simple|all|disable)[+(Flag1|Flag2|etc)], where here
89 // InitialBits is one of (disabled|all|simple). EnableBits represents
90 // additional flags we're enabling, and DisableBits for those flags we're
91 // disabling. The default flag is tracked in the variable NeedsDefault, since
92 // at the time of setting the option we may not know what the default value
93 // for the CPU is.
97
98 // This value needs to be initialised to true in case the user does not
99 // explicitly set the -sve-tail-folding option.
100 bool NeedsDefault = true;
101
102 void setInitialBits(TailFoldingOpts Bits) { InitialBits = Bits; }
103
104 void setNeedsDefault(bool V) { NeedsDefault = V; }
105
106 void setEnableBit(TailFoldingOpts Bit) {
107 EnableBits |= Bit;
108 DisableBits &= ~Bit;
109 }
110
111 void setDisableBit(TailFoldingOpts Bit) {
112 EnableBits &= ~Bit;
113 DisableBits |= Bit;
114 }
115
116 TailFoldingOpts getBits(TailFoldingOpts DefaultBits) const {
117 TailFoldingOpts Bits = TailFoldingOpts::Disabled;
118
119 assert((InitialBits == TailFoldingOpts::Disabled || !NeedsDefault) &&
120 "Initial bits should only include one of "
121 "(disabled|all|simple|default)");
122 Bits = NeedsDefault ? DefaultBits : InitialBits;
123 Bits |= EnableBits;
124 Bits &= ~DisableBits;
125
126 return Bits;
127 }
128
129 void reportError(std::string Opt) {
130 errs() << "invalid argument '" << Opt
131 << "' to -sve-tail-folding=; the option should be of the form\n"
132 " (disabled|all|default|simple)[+(reductions|recurrences"
133 "|reverse|noreductions|norecurrences|noreverse)]\n";
134 report_fatal_error("Unrecognised tail-folding option");
135 }
136
137public:
138
139 void operator=(const std::string &Val) {
140 // If the user explicitly sets -sve-tail-folding= then treat as an error.
141 if (Val.empty()) {
142 reportError("");
143 return;
144 }
145
146 // Since the user is explicitly setting the option we don't automatically
147 // need the default unless they require it.
148 setNeedsDefault(false);
149
150 SmallVector<StringRef, 4> TailFoldTypes;
151 StringRef(Val).split(TailFoldTypes, '+', -1, false);
152
153 unsigned StartIdx = 1;
154 if (TailFoldTypes[0] == "disabled")
155 setInitialBits(TailFoldingOpts::Disabled);
156 else if (TailFoldTypes[0] == "all")
157 setInitialBits(TailFoldingOpts::All);
158 else if (TailFoldTypes[0] == "default")
159 setNeedsDefault(true);
160 else if (TailFoldTypes[0] == "simple")
161 setInitialBits(TailFoldingOpts::Simple);
162 else {
163 StartIdx = 0;
164 setInitialBits(TailFoldingOpts::Disabled);
165 }
166
167 for (unsigned I = StartIdx; I < TailFoldTypes.size(); I++) {
168 if (TailFoldTypes[I] == "reductions")
169 setEnableBit(TailFoldingOpts::Reductions);
170 else if (TailFoldTypes[I] == "recurrences")
171 setEnableBit(TailFoldingOpts::Recurrences);
172 else if (TailFoldTypes[I] == "reverse")
173 setEnableBit(TailFoldingOpts::Reverse);
174 else if (TailFoldTypes[I] == "noreductions")
175 setDisableBit(TailFoldingOpts::Reductions);
176 else if (TailFoldTypes[I] == "norecurrences")
177 setDisableBit(TailFoldingOpts::Recurrences);
178 else if (TailFoldTypes[I] == "noreverse")
179 setDisableBit(TailFoldingOpts::Reverse);
180 else
181 reportError(Val);
182 }
183 }
184
185 bool satisfies(TailFoldingOpts DefaultBits, TailFoldingOpts Required) const {
186 return (getBits(DefaultBits) & Required) == Required;
187 }
188};
189} // namespace
190
191TailFoldingOption TailFoldingOptionLoc;
192
194 "sve-tail-folding",
195 cl::desc(
196 "Control the use of vectorisation using tail-folding for SVE where the"
197 " option is specified in the form (Initial)[+(Flag1|Flag2|...)]:"
198 "\ndisabled (Initial) No loop types will vectorize using "
199 "tail-folding"
200 "\ndefault (Initial) Uses the default tail-folding settings for "
201 "the target CPU"
202 "\nall (Initial) All legal loop types will vectorize using "
203 "tail-folding"
204 "\nsimple (Initial) Use tail-folding for simple loops (not "
205 "reductions or recurrences)"
206 "\nreductions Use tail-folding for loops containing reductions"
207 "\nnoreductions Inverse of above"
208 "\nrecurrences Use tail-folding for loops containing fixed order "
209 "recurrences"
210 "\nnorecurrences Inverse of above"
211 "\nreverse Use tail-folding for loops requiring reversed "
212 "predicates"
213 "\nnoreverse Inverse of above"),
215
216// Experimental option that will only be fully functional when the
217// code-generator is changed to use SVE instead of NEON for all fixed-width
218// operations.
220 "enable-fixedwidth-autovec-in-streaming-mode", cl::init(false), cl::Hidden);
221
222// Experimental option that will only be fully functional when the cost-model
223// and code-generator have been changed to avoid using scalable vector
224// instructions that are not legal in streaming SVE mode.
226 "enable-scalable-autovec-in-streaming-mode", cl::init(false), cl::Hidden);
227
228static bool isSMEABIRoutineCall(const CallInst &CI,
229 const AArch64TargetLowering &TLI) {
230 const auto *F = CI.getCalledFunction();
231 return F &&
233}
234
235/// Returns true if the function has explicit operations that can only be
236/// lowered using incompatible instructions for the selected mode. This also
237/// returns true if the function F may use or modify ZA state.
239 const AArch64TargetLowering &TLI) {
240 for (const BasicBlock &BB : *F) {
241 for (const Instruction &I : BB) {
242 // Be conservative for now and assume that any call to inline asm or to
243 // intrinsics could could result in non-streaming ops (e.g. calls to
244 // @llvm.aarch64.* or @llvm.gather/scatter intrinsics). We can assume that
245 // all native LLVM instructions can be lowered to compatible instructions.
246 if (isa<CallInst>(I) && !I.isDebugOrPseudoInst() &&
247 (cast<CallInst>(I).isInlineAsm() || isa<IntrinsicInst>(I) ||
249 return true;
250 }
251 }
252 return false;
253}
254
256 SmallVectorImpl<StringRef> &Features) {
257 StringRef AttributeStr =
258 TTI->isMultiversionedFunction(F) ? "fmv-features" : "target-features";
259 StringRef FeatureStr = F.getFnAttribute(AttributeStr).getValueAsString();
260 FeatureStr.split(Features, ",");
261}
262
265 extractAttrFeatures(F, this, Features);
266 return AArch64::getCpuSupportsMask(Features);
267}
268
271 extractAttrFeatures(F, this, Features);
272 return AArch64::getFMVPriority(Features);
273}
274
276 return F.hasFnAttribute("fmv-features");
277}
278
279const FeatureBitset AArch64TTIImpl::InlineInverseFeatures = {
280 AArch64::FeatureExecuteOnly,
281};
282
284 const Function *Callee) const {
285 SMECallAttrs CallAttrs(*Caller, *Callee);
286
287 // Never inline a function explicitly marked as being streaming,
288 // into a non-streaming function. Assume it was marked as streaming
289 // for a reason.
290 if (CallAttrs.caller().hasNonStreamingInterfaceAndBody() &&
292 return false;
293
294 // When inlining, we should consider the body of the function, not the
295 // interface.
296 if (CallAttrs.callee().hasStreamingBody()) {
297 CallAttrs.callee().set(SMEAttrs::SM_Compatible, false);
298 CallAttrs.callee().set(SMEAttrs::SM_Enabled, true);
299 }
300
301 if (CallAttrs.callee().isNewZA() || CallAttrs.callee().isNewZT0())
302 return false;
303
304 if (CallAttrs.requiresLazySave() || CallAttrs.requiresSMChange() ||
305 CallAttrs.requiresPreservingZT0() ||
306 CallAttrs.requiresPreservingAllZAState()) {
307 if (hasPossibleIncompatibleOps(Callee, *getTLI()))
308 return false;
309 }
310
311 const TargetMachine &TM = getTLI()->getTargetMachine();
312 const FeatureBitset &CallerBits =
313 TM.getSubtargetImpl(*Caller)->getFeatureBits();
314 const FeatureBitset &CalleeBits =
315 TM.getSubtargetImpl(*Callee)->getFeatureBits();
316 // Adjust the feature bitsets by inverting some of the bits. This is needed
317 // for target features that represent restrictions rather than capabilities,
318 // for example a "+execute-only" callee can be inlined into a caller without
319 // "+execute-only", but not vice versa.
320 FeatureBitset EffectiveCallerBits = CallerBits ^ InlineInverseFeatures;
321 FeatureBitset EffectiveCalleeBits = CalleeBits ^ InlineInverseFeatures;
322
323 return (EffectiveCallerBits & EffectiveCalleeBits) == EffectiveCalleeBits;
324}
325
327 const Function *Callee,
328 ArrayRef<Type *> Types) const {
329 if (!BaseT::areTypesABICompatible(Caller, Callee, Types))
330 return false;
331
332 // We need to ensure that argument promotion does not attempt to promote
333 // pointers to fixed-length vector types larger than 128 bits like
334 // <8 x float> (and pointers to aggregate types which have such fixed-length
335 // vector type members) into the values of the pointees. Such vector types
336 // are used for SVE VLS but there is no ABI for SVE VLS arguments and the
337 // backend cannot lower such value arguments. The 128-bit fixed-length SVE
338 // types can be safely treated as 128-bit NEON types and they cannot be
339 // distinguished in IR.
340 if (ST->useSVEForFixedLengthVectors() && llvm::any_of(Types, [](Type *Ty) {
341 auto FVTy = dyn_cast<FixedVectorType>(Ty);
342 return FVTy &&
343 FVTy->getScalarSizeInBits() * FVTy->getNumElements() > 128;
344 }))
345 return false;
346
347 return true;
348}
349
350unsigned
352 unsigned DefaultCallPenalty) const {
353 // This function calculates a penalty for executing Call in F.
354 //
355 // There are two ways this function can be called:
356 // (1) F:
357 // call from F -> G (the call here is Call)
358 //
359 // For (1), Call.getCaller() == F, so it will always return a high cost if
360 // a streaming-mode change is required (thus promoting the need to inline the
361 // function)
362 //
363 // (2) F:
364 // call from F -> G (the call here is not Call)
365 // G:
366 // call from G -> H (the call here is Call)
367 //
368 // For (2), if after inlining the body of G into F the call to H requires a
369 // streaming-mode change, and the call to G from F would also require a
370 // streaming-mode change, then there is benefit to do the streaming-mode
371 // change only once and avoid inlining of G into F.
372
373 SMEAttrs FAttrs(*F);
374 SMECallAttrs CallAttrs(Call, &getTLI()->getRuntimeLibcallsInfo());
375
376 if (SMECallAttrs(FAttrs, CallAttrs.callee()).requiresSMChange()) {
377 if (F == Call.getCaller()) // (1)
378 return CallPenaltyChangeSM * DefaultCallPenalty;
379 if (SMECallAttrs(FAttrs, CallAttrs.caller()).requiresSMChange()) // (2)
380 return InlineCallPenaltyChangeSM * DefaultCallPenalty;
381 }
382
383 return DefaultCallPenalty;
384}
385
389
390 if (K == TargetTransformInfo::RGK_FixedWidthVector && ST->isNeonAvailable())
391 return true;
392
394 ST->isSVEorStreamingSVEAvailable() &&
395 !ST->disableMaximizeScalableBandwidth();
396}
397
398/// Calculate the cost of materializing a 64-bit value. This helper
399/// method might only calculate a fraction of a larger immediate. Therefore it
400/// is valid to return a cost of ZERO.
402 // Check if the immediate can be encoded within an instruction.
403 if (Val == 0 || AArch64_AM::isLogicalImmediate(Val, 64))
404 return 0;
405
406 if (Val < 0)
407 Val = ~Val;
408
409 // Calculate how many moves we will need to materialize this constant.
411 AArch64_IMM::expandMOVImm(Val, 64, Insn);
412 return Insn.size();
413}
414
415/// Calculate the cost of materializing the given constant.
419 assert(Ty->isIntegerTy());
420
421 unsigned BitSize = Ty->getPrimitiveSizeInBits();
422 if (BitSize == 0)
423 return ~0U;
424
425 // Sign-extend all constants to a multiple of 64-bit.
426 APInt ImmVal = Imm;
427 if (BitSize & 0x3f)
428 ImmVal = Imm.sext((BitSize + 63) & ~0x3fU);
429
430 // Split the constant into 64-bit chunks and calculate the cost for each
431 // chunk.
433 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
434 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
435 int64_t Val = Tmp.getSExtValue();
436 Cost += getIntImmCost(Val);
437 }
438 // We need at least one instruction to materialze the constant.
439 return std::max<InstructionCost>(1, Cost);
440}
441
443 const APInt &Imm, Type *Ty,
445 Instruction *Inst) const {
446 assert(Ty->isIntegerTy());
447
448 unsigned BitSize = Ty->getPrimitiveSizeInBits();
449 // There is no cost model for constants with a bit size of 0. Return TCC_Free
450 // here, so that constant hoisting will ignore this constant.
451 if (BitSize == 0)
452 return TTI::TCC_Free;
453
454 unsigned ImmIdx = ~0U;
455 switch (Opcode) {
456 default:
457 return TTI::TCC_Free;
458 case Instruction::GetElementPtr:
459 // Always hoist the base address of a GetElementPtr.
460 if (Idx == 0)
461 return 2 * TTI::TCC_Basic;
462 return TTI::TCC_Free;
463 case Instruction::Store:
464 ImmIdx = 0;
465 break;
466 case Instruction::Add:
467 case Instruction::Sub:
468 case Instruction::Mul:
469 case Instruction::UDiv:
470 case Instruction::SDiv:
471 case Instruction::URem:
472 case Instruction::SRem:
473 case Instruction::And:
474 case Instruction::Or:
475 case Instruction::Xor:
476 case Instruction::ICmp:
477 ImmIdx = 1;
478 break;
479 // Always return TCC_Free for the shift value of a shift instruction.
480 case Instruction::Shl:
481 case Instruction::LShr:
482 case Instruction::AShr:
483 if (Idx == 1)
484 return TTI::TCC_Free;
485 break;
486 case Instruction::Trunc:
487 case Instruction::ZExt:
488 case Instruction::SExt:
489 case Instruction::IntToPtr:
490 case Instruction::PtrToInt:
491 case Instruction::BitCast:
492 case Instruction::PHI:
493 case Instruction::Call:
494 case Instruction::Select:
495 case Instruction::Ret:
496 case Instruction::Load:
497 break;
498 }
499
500 if (Idx == ImmIdx) {
501 int NumConstants = (BitSize + 63) / 64;
503 return (Cost <= NumConstants * TTI::TCC_Basic)
504 ? static_cast<int>(TTI::TCC_Free)
505 : Cost;
506 }
508}
509
512 const APInt &Imm, Type *Ty,
514 assert(Ty->isIntegerTy());
515
516 unsigned BitSize = Ty->getPrimitiveSizeInBits();
517 // There is no cost model for constants with a bit size of 0. Return TCC_Free
518 // here, so that constant hoisting will ignore this constant.
519 if (BitSize == 0)
520 return TTI::TCC_Free;
521
522 // Most (all?) AArch64 intrinsics do not support folding immediates into the
523 // selected instruction, so we compute the materialization cost for the
524 // immediate directly.
525 if (IID >= Intrinsic::aarch64_addg && IID <= Intrinsic::aarch64_udiv)
527
528 switch (IID) {
529 default:
530 return TTI::TCC_Free;
531 case Intrinsic::sadd_with_overflow:
532 case Intrinsic::uadd_with_overflow:
533 case Intrinsic::ssub_with_overflow:
534 case Intrinsic::usub_with_overflow:
535 case Intrinsic::smul_with_overflow:
536 case Intrinsic::umul_with_overflow:
537 if (Idx == 1) {
538 int NumConstants = (BitSize + 63) / 64;
540 return (Cost <= NumConstants * TTI::TCC_Basic)
541 ? static_cast<int>(TTI::TCC_Free)
542 : Cost;
543 }
544 break;
545 case Intrinsic::experimental_stackmap:
546 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
547 return TTI::TCC_Free;
548 break;
549 case Intrinsic::experimental_patchpoint_void:
550 case Intrinsic::experimental_patchpoint:
551 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
552 return TTI::TCC_Free;
553 break;
554 case Intrinsic::experimental_gc_statepoint:
555 if ((Idx < 5) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
556 return TTI::TCC_Free;
557 break;
558 }
560}
561
563AArch64TTIImpl::getPopcntSupport(unsigned TyWidth) const {
564 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
565 if (TyWidth == 32 || TyWidth == 64)
567 // TODO: AArch64TargetLowering::LowerCTPOP() supports 128bit popcount.
568 return TTI::PSK_Software;
569}
570
571static bool isUnpackedVectorVT(EVT VecVT) {
572 return VecVT.isScalableVector() &&
574}
575
577 const IntrinsicCostAttributes &ICA) {
578 // We need to know at least the number of elements in the vector of buckets
579 // and the size of each element to update.
580 if (ICA.getArgTypes().size() < 2)
582
583 // Only interested in costing for the hardware instruction from SVE2.
584 if (!ST->hasSVE2())
586
587 Type *BucketPtrsTy = ICA.getArgTypes()[0]; // Type of vector of pointers
588 Type *EltTy = ICA.getArgTypes()[1]; // Type of bucket elements
589 unsigned TotalHistCnts = 1;
590
591 unsigned EltSize = EltTy->getScalarSizeInBits();
592 // Only allow (up to 64b) integers or pointers
593 if ((!EltTy->isIntegerTy() && !EltTy->isPointerTy()) || EltSize > 64)
595
596 // FIXME: We should be able to generate histcnt for fixed-length vectors
597 // using ptrue with a specific VL.
598 if (VectorType *VTy = dyn_cast<VectorType>(BucketPtrsTy)) {
599 unsigned EC = VTy->getElementCount().getKnownMinValue();
600 if (!isPowerOf2_64(EC) || !VTy->isScalableTy())
602
603 // HistCnt only supports 32b and 64b element types
604 unsigned LegalEltSize = EltSize <= 32 ? 32 : 64;
605
606 if (EC == 2 || (LegalEltSize == 32 && EC == 4))
608
609 unsigned NaturalVectorWidth = AArch64::SVEBitsPerBlock / LegalEltSize;
610 TotalHistCnts = EC / NaturalVectorWidth;
611
612 return InstructionCost(BaseHistCntCost * TotalHistCnts);
613 }
614
616}
617
621 // The code-generator is currently not able to handle scalable vectors
622 // of <vscale x 1 x eltty> yet, so return an invalid cost to avoid selecting
623 // it. This change will be removed when code-generation for these types is
624 // sufficiently reliable.
625 auto *RetTy = ICA.getReturnType();
626 if (auto *VTy = dyn_cast<ScalableVectorType>(RetTy))
627 if (VTy->getElementCount() == ElementCount::getScalable(1))
629
630 switch (ICA.getID()) {
631 case Intrinsic::experimental_vector_histogram_add: {
632 InstructionCost HistCost = getHistogramCost(ST, ICA);
633 // If the cost isn't valid, we may still be able to scalarize
634 if (HistCost.isValid())
635 return HistCost;
636 break;
637 }
638 case Intrinsic::umin:
639 case Intrinsic::umax:
640 case Intrinsic::smin:
641 case Intrinsic::smax: {
642 static const auto ValidMinMaxTys = {MVT::v8i8, MVT::v16i8, MVT::v4i16,
643 MVT::v8i16, MVT::v2i32, MVT::v4i32,
644 MVT::nxv16i8, MVT::nxv8i16, MVT::nxv4i32,
645 MVT::nxv2i64};
646 auto LT = getTypeLegalizationCost(RetTy);
647 // v2i64 types get converted to cmp+bif hence the cost of 2
648 if (LT.second == MVT::v2i64)
649 return LT.first * 2;
650 if (any_of(ValidMinMaxTys, equal_to(LT.second)))
651 return LT.first;
652 break;
653 }
654 case Intrinsic::scmp:
655 case Intrinsic::ucmp: {
656 static const CostTblEntry BitreverseTbl[] = {
657 {Intrinsic::scmp, MVT::i32, 3}, // cmp+cset+csinv
658 {Intrinsic::scmp, MVT::i64, 3}, // cmp+cset+csinv
659 {Intrinsic::scmp, MVT::v8i8, 3}, // cmgt+cmgt+sub
660 {Intrinsic::scmp, MVT::v16i8, 3}, // cmgt+cmgt+sub
661 {Intrinsic::scmp, MVT::v4i16, 3}, // cmgt+cmgt+sub
662 {Intrinsic::scmp, MVT::v8i16, 3}, // cmgt+cmgt+sub
663 {Intrinsic::scmp, MVT::v2i32, 3}, // cmgt+cmgt+sub
664 {Intrinsic::scmp, MVT::v4i32, 3}, // cmgt+cmgt+sub
665 {Intrinsic::scmp, MVT::v1i64, 3}, // cmgt+cmgt+sub
666 {Intrinsic::scmp, MVT::v2i64, 3}, // cmgt+cmgt+sub
667 };
668 const auto LT = getTypeLegalizationCost(RetTy);
669 const auto *Entry =
670 CostTableLookup(BitreverseTbl, Intrinsic::scmp, LT.second);
671 if (Entry)
672 return Entry->Cost * LT.first;
673 break;
674 }
675 case Intrinsic::sadd_sat:
676 case Intrinsic::ssub_sat:
677 case Intrinsic::uadd_sat:
678 case Intrinsic::usub_sat: {
679 static const auto ValidSatTys = {MVT::v8i8, MVT::v16i8, MVT::v4i16,
680 MVT::v8i16, MVT::v2i32, MVT::v4i32,
681 MVT::v2i64};
682 auto LT = getTypeLegalizationCost(RetTy);
683 // This is a base cost of 1 for the vadd, plus 3 extract shifts if we
684 // need to extend the type, as it uses shr(qadd(shl, shl)).
685 unsigned Instrs =
686 LT.second.getScalarSizeInBits() == RetTy->getScalarSizeInBits() ? 1 : 4;
687 if (any_of(ValidSatTys, equal_to(LT.second)))
688 return LT.first * Instrs;
689
691 uint64_t VectorSize = TS.getKnownMinValue();
692
693 if (ST->isSVEAvailable() && VectorSize >= 128 && isPowerOf2_64(VectorSize))
694 return LT.first * Instrs;
695
696 break;
697 }
698 case Intrinsic::abs: {
699 static const auto ValidAbsTys = {MVT::v8i8, MVT::v16i8, MVT::v4i16,
700 MVT::v8i16, MVT::v2i32, MVT::v4i32,
701 MVT::v2i64};
702 auto LT = getTypeLegalizationCost(RetTy);
703 if (any_of(ValidAbsTys, equal_to(LT.second)))
704 return LT.first;
705 break;
706 }
707 case Intrinsic::bswap: {
708 static const auto ValidAbsTys = {MVT::v4i16, MVT::v8i16, MVT::v2i32,
709 MVT::v4i32, MVT::v2i64};
710 auto LT = getTypeLegalizationCost(RetTy);
711 if (any_of(ValidAbsTys, equal_to(LT.second)) &&
712 LT.second.getScalarSizeInBits() == RetTy->getScalarSizeInBits())
713 return LT.first;
714 break;
715 }
716 case Intrinsic::fma:
717 case Intrinsic::fmuladd: {
718 // Given a fma or fmuladd, cost it the same as a fmul instruction which are
719 // usually the same for costs. TODO: Add fp16 and bf16 expansion costs.
720 Type *EltTy = RetTy->getScalarType();
721 if (EltTy->isFloatTy() || EltTy->isDoubleTy() ||
722 (EltTy->isHalfTy() && ST->hasFullFP16()))
723 return getArithmeticInstrCost(Instruction::FMul, RetTy, CostKind);
724 break;
725 }
726 case Intrinsic::stepvector: {
727 InstructionCost Cost = 1; // Cost of the `index' instruction
728 auto LT = getTypeLegalizationCost(RetTy);
729 // Legalisation of illegal vectors involves an `index' instruction plus
730 // (LT.first - 1) vector adds.
731 if (LT.first > 1) {
732 Type *LegalVTy = EVT(LT.second).getTypeForEVT(RetTy->getContext());
733 InstructionCost AddCost =
734 getArithmeticInstrCost(Instruction::Add, LegalVTy, CostKind);
735 Cost += AddCost * (LT.first - 1);
736 }
737 return Cost;
738 }
739 case Intrinsic::vector_extract:
740 case Intrinsic::vector_insert: {
741 // If both the vector and subvector types are legal types and the index
742 // is 0, then this should be a no-op or simple operation; return a
743 // relatively low cost.
744
745 // If arguments aren't actually supplied, then we cannot determine the
746 // value of the index. We also want to skip predicate types.
747 if (ICA.getArgs().size() != ICA.getArgTypes().size() ||
749 break;
750
751 LLVMContext &C = RetTy->getContext();
752 EVT VecVT = getTLI()->getValueType(DL, ICA.getArgTypes()[0]);
753 bool IsExtract = ICA.getID() == Intrinsic::vector_extract;
754 EVT SubVecVT = IsExtract ? getTLI()->getValueType(DL, RetTy)
755 : getTLI()->getValueType(DL, ICA.getArgTypes()[1]);
756 // Skip this if either the vector or subvector types are unpacked
757 // SVE types; they may get lowered to stack stores and loads.
758 if (isUnpackedVectorVT(VecVT) || isUnpackedVectorVT(SubVecVT))
759 break;
760
762 getTLI()->getTypeConversion(C, SubVecVT);
764 getTLI()->getTypeConversion(C, VecVT);
765 const Value *Idx = IsExtract ? ICA.getArgs()[1] : ICA.getArgs()[2];
766 const ConstantInt *CIdx = cast<ConstantInt>(Idx);
767 if (SubVecLK.first == TargetLoweringBase::TypeLegal &&
768 VecLK.first == TargetLoweringBase::TypeLegal && CIdx->isZero())
769 return TTI::TCC_Free;
770 break;
771 }
772 case Intrinsic::bitreverse: {
773 static const CostTblEntry BitreverseTbl[] = {
774 {Intrinsic::bitreverse, MVT::i32, 1},
775 {Intrinsic::bitreverse, MVT::i64, 1},
776 {Intrinsic::bitreverse, MVT::v8i8, 1},
777 {Intrinsic::bitreverse, MVT::v16i8, 1},
778 {Intrinsic::bitreverse, MVT::v4i16, 2},
779 {Intrinsic::bitreverse, MVT::v8i16, 2},
780 {Intrinsic::bitreverse, MVT::v2i32, 2},
781 {Intrinsic::bitreverse, MVT::v4i32, 2},
782 {Intrinsic::bitreverse, MVT::v1i64, 2},
783 {Intrinsic::bitreverse, MVT::v2i64, 2},
784 };
785 const auto LegalisationCost = getTypeLegalizationCost(RetTy);
786 const auto *Entry =
787 CostTableLookup(BitreverseTbl, ICA.getID(), LegalisationCost.second);
788 if (Entry) {
789 // Cost Model is using the legal type(i32) that i8 and i16 will be
790 // converted to +1 so that we match the actual lowering cost
791 if (TLI->getValueType(DL, RetTy, true) == MVT::i8 ||
792 TLI->getValueType(DL, RetTy, true) == MVT::i16)
793 return LegalisationCost.first * Entry->Cost + 1;
794
795 return LegalisationCost.first * Entry->Cost;
796 }
797 break;
798 }
799 case Intrinsic::ctpop: {
800 if (!ST->hasNEON()) {
801 // 32-bit or 64-bit ctpop without NEON is 12 instructions.
802 return getTypeLegalizationCost(RetTy).first * 12;
803 }
804 static const CostTblEntry CtpopCostTbl[] = {
805 {ISD::CTPOP, MVT::v2i64, 4},
806 {ISD::CTPOP, MVT::v4i32, 3},
807 {ISD::CTPOP, MVT::v8i16, 2},
808 {ISD::CTPOP, MVT::v16i8, 1},
809 {ISD::CTPOP, MVT::i64, 4},
810 {ISD::CTPOP, MVT::v2i32, 3},
811 {ISD::CTPOP, MVT::v4i16, 2},
812 {ISD::CTPOP, MVT::v8i8, 1},
813 {ISD::CTPOP, MVT::i32, 5},
814 };
815 auto LT = getTypeLegalizationCost(RetTy);
816 MVT MTy = LT.second;
817 if (const auto *Entry = CostTableLookup(CtpopCostTbl, ISD::CTPOP, MTy)) {
818 // Extra cost of +1 when illegal vector types are legalized by promoting
819 // the integer type.
820 int ExtraCost = MTy.isVector() && MTy.getScalarSizeInBits() !=
821 RetTy->getScalarSizeInBits()
822 ? 1
823 : 0;
824 return LT.first * Entry->Cost + ExtraCost;
825 }
826 break;
827 }
828 case Intrinsic::sadd_with_overflow:
829 case Intrinsic::uadd_with_overflow:
830 case Intrinsic::ssub_with_overflow:
831 case Intrinsic::usub_with_overflow:
832 case Intrinsic::smul_with_overflow:
833 case Intrinsic::umul_with_overflow: {
834 static const CostTblEntry WithOverflowCostTbl[] = {
835 {Intrinsic::sadd_with_overflow, MVT::i8, 3},
836 {Intrinsic::uadd_with_overflow, MVT::i8, 3},
837 {Intrinsic::sadd_with_overflow, MVT::i16, 3},
838 {Intrinsic::uadd_with_overflow, MVT::i16, 3},
839 {Intrinsic::sadd_with_overflow, MVT::i32, 1},
840 {Intrinsic::uadd_with_overflow, MVT::i32, 1},
841 {Intrinsic::sadd_with_overflow, MVT::i64, 1},
842 {Intrinsic::uadd_with_overflow, MVT::i64, 1},
843 {Intrinsic::ssub_with_overflow, MVT::i8, 3},
844 {Intrinsic::usub_with_overflow, MVT::i8, 3},
845 {Intrinsic::ssub_with_overflow, MVT::i16, 3},
846 {Intrinsic::usub_with_overflow, MVT::i16, 3},
847 {Intrinsic::ssub_with_overflow, MVT::i32, 1},
848 {Intrinsic::usub_with_overflow, MVT::i32, 1},
849 {Intrinsic::ssub_with_overflow, MVT::i64, 1},
850 {Intrinsic::usub_with_overflow, MVT::i64, 1},
851 {Intrinsic::smul_with_overflow, MVT::i8, 5},
852 {Intrinsic::umul_with_overflow, MVT::i8, 4},
853 {Intrinsic::smul_with_overflow, MVT::i16, 5},
854 {Intrinsic::umul_with_overflow, MVT::i16, 4},
855 {Intrinsic::smul_with_overflow, MVT::i32, 2}, // eg umull;tst
856 {Intrinsic::umul_with_overflow, MVT::i32, 2}, // eg umull;cmp sxtw
857 {Intrinsic::smul_with_overflow, MVT::i64, 3}, // eg mul;smulh;cmp
858 {Intrinsic::umul_with_overflow, MVT::i64, 3}, // eg mul;umulh;cmp asr
859 };
860 EVT MTy = TLI->getValueType(DL, RetTy->getContainedType(0), true);
861 if (MTy.isSimple())
862 if (const auto *Entry = CostTableLookup(WithOverflowCostTbl, ICA.getID(),
863 MTy.getSimpleVT()))
864 return Entry->Cost;
865 break;
866 }
867 case Intrinsic::fptosi_sat:
868 case Intrinsic::fptoui_sat: {
869 if (ICA.getArgTypes().empty())
870 break;
871 bool IsSigned = ICA.getID() == Intrinsic::fptosi_sat;
872 auto LT = getTypeLegalizationCost(ICA.getArgTypes()[0]);
873 EVT MTy = TLI->getValueType(DL, RetTy);
874 // Check for the legal types, which are where the size of the input and the
875 // output are the same, or we are using cvt f64->i32 or f32->i64.
876 if ((LT.second == MVT::f32 || LT.second == MVT::f64 ||
877 LT.second == MVT::v2f32 || LT.second == MVT::v4f32 ||
878 LT.second == MVT::v2f64)) {
879 if ((LT.second.getScalarSizeInBits() == MTy.getScalarSizeInBits() ||
880 (LT.second == MVT::f64 && MTy == MVT::i32) ||
881 (LT.second == MVT::f32 && MTy == MVT::i64)))
882 return LT.first;
883 // Extending vector types v2f32->v2i64, fcvtl*2 + fcvt*2
884 if (LT.second.getScalarType() == MVT::f32 && MTy.isFixedLengthVector() &&
885 MTy.getScalarSizeInBits() == 64)
886 return LT.first * (MTy.getVectorNumElements() > 2 ? 4 : 2);
887 }
888 // Similarly for fp16 sizes. Without FullFP16 we generally need to fcvt to
889 // f32.
890 if (LT.second.getScalarType() == MVT::f16 && !ST->hasFullFP16())
891 return LT.first + getIntrinsicInstrCost(
892 {ICA.getID(),
893 RetTy,
894 {ICA.getArgTypes()[0]->getWithNewType(
895 Type::getFloatTy(RetTy->getContext()))}},
896 CostKind);
897 if ((LT.second == MVT::f16 && MTy == MVT::i32) ||
898 (LT.second == MVT::f16 && MTy == MVT::i64) ||
899 ((LT.second == MVT::v4f16 || LT.second == MVT::v8f16) &&
900 (LT.second.getScalarSizeInBits() == MTy.getScalarSizeInBits())))
901 return LT.first;
902 // Extending vector types v8f16->v8i32, fcvtl*2 + fcvt*2
903 if (LT.second.getScalarType() == MVT::f16 && MTy.isFixedLengthVector() &&
904 MTy.getScalarSizeInBits() == 32)
905 return LT.first * (MTy.getVectorNumElements() > 4 ? 4 : 2);
906 // Extending vector types v8f16->v8i32. These current scalarize but the
907 // codegen could be better.
908 if (LT.second.getScalarType() == MVT::f16 && MTy.isFixedLengthVector() &&
909 MTy.getScalarSizeInBits() == 64)
910 return MTy.getVectorNumElements() * 3;
911
912 // If we can we use a legal convert followed by a min+max
913 if ((LT.second.getScalarType() == MVT::f32 ||
914 LT.second.getScalarType() == MVT::f64 ||
915 LT.second.getScalarType() == MVT::f16) &&
916 LT.second.getScalarSizeInBits() >= MTy.getScalarSizeInBits()) {
917 Type *LegalTy =
918 Type::getIntNTy(RetTy->getContext(), LT.second.getScalarSizeInBits());
919 if (LT.second.isVector())
920 LegalTy = VectorType::get(LegalTy, LT.second.getVectorElementCount());
922 IntrinsicCostAttributes Attrs1(IsSigned ? Intrinsic::smin : Intrinsic::umin,
923 LegalTy, {LegalTy, LegalTy});
925 IntrinsicCostAttributes Attrs2(IsSigned ? Intrinsic::smax : Intrinsic::umax,
926 LegalTy, {LegalTy, LegalTy});
928 return LT.first * Cost +
929 ((LT.second.getScalarType() != MVT::f16 || ST->hasFullFP16()) ? 0
930 : 1);
931 }
932 // Otherwise we need to follow the default expansion that clamps the value
933 // using a float min/max with a fcmp+sel for nan handling when signed.
934 Type *FPTy = ICA.getArgTypes()[0]->getScalarType();
935 RetTy = RetTy->getScalarType();
936 if (LT.second.isVector()) {
937 FPTy = VectorType::get(FPTy, LT.second.getVectorElementCount());
938 RetTy = VectorType::get(RetTy, LT.second.getVectorElementCount());
939 }
940 IntrinsicCostAttributes Attrs1(Intrinsic::minnum, FPTy, {FPTy, FPTy});
942 IntrinsicCostAttributes Attrs2(Intrinsic::maxnum, FPTy, {FPTy, FPTy});
944 Cost +=
945 getCastInstrCost(IsSigned ? Instruction::FPToSI : Instruction::FPToUI,
947 if (IsSigned) {
948 Type *CondTy = RetTy->getWithNewBitWidth(1);
949 Cost += getCmpSelInstrCost(BinaryOperator::FCmp, FPTy, CondTy,
951 Cost += getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
953 }
954 return LT.first * Cost;
955 }
956 case Intrinsic::fshl:
957 case Intrinsic::fshr: {
958 if (ICA.getArgs().empty())
959 break;
960
961 const TTI::OperandValueInfo OpInfoZ = TTI::getOperandInfo(ICA.getArgs()[2]);
962
963 // ROTR / ROTL is a funnel shift with equal first and second operand. For
964 // ROTR on integer registers (i32/i64) this can be done in a single ror
965 // instruction. A fshl with a non-constant shift uses a neg + ror.
966 if (RetTy->isIntegerTy() && ICA.getArgs()[0] == ICA.getArgs()[1] &&
967 (RetTy->getPrimitiveSizeInBits() == 32 ||
968 RetTy->getPrimitiveSizeInBits() == 64)) {
969 InstructionCost NegCost =
970 (ICA.getID() == Intrinsic::fshl && !OpInfoZ.isConstant()) ? 1 : 0;
971 return 1 + NegCost;
972 }
973
974 // TODO: Add handling for fshl where third argument is not a constant.
975 if (!OpInfoZ.isConstant())
976 break;
977
978 const auto LegalisationCost = getTypeLegalizationCost(RetTy);
979 if (OpInfoZ.isUniform()) {
980 static const CostTblEntry FshlTbl[] = {
981 {Intrinsic::fshl, MVT::v4i32, 2}, // shl + usra
982 {Intrinsic::fshl, MVT::v2i64, 2}, {Intrinsic::fshl, MVT::v16i8, 2},
983 {Intrinsic::fshl, MVT::v8i16, 2}, {Intrinsic::fshl, MVT::v2i32, 2},
984 {Intrinsic::fshl, MVT::v8i8, 2}, {Intrinsic::fshl, MVT::v4i16, 2}};
985 // Costs for both fshl & fshr are the same, so just pass Intrinsic::fshl
986 // to avoid having to duplicate the costs.
987 const auto *Entry =
988 CostTableLookup(FshlTbl, Intrinsic::fshl, LegalisationCost.second);
989 if (Entry)
990 return LegalisationCost.first * Entry->Cost;
991 }
992
993 auto TyL = getTypeLegalizationCost(RetTy);
994 if (!RetTy->isIntegerTy())
995 break;
996
997 // Estimate cost manually, as types like i8 and i16 will get promoted to
998 // i32 and CostTableLookup will ignore the extra conversion cost.
999 bool HigherCost = (RetTy->getScalarSizeInBits() != 32 &&
1000 RetTy->getScalarSizeInBits() < 64) ||
1001 (RetTy->getScalarSizeInBits() % 64 != 0);
1002 unsigned ExtraCost = HigherCost ? 1 : 0;
1003 if (RetTy->getScalarSizeInBits() == 32 ||
1004 RetTy->getScalarSizeInBits() == 64)
1005 ExtraCost = 0; // fhsl/fshr for i32 and i64 can be lowered to a single
1006 // extr instruction.
1007 else if (HigherCost)
1008 ExtraCost = 1;
1009 else
1010 break;
1011 return TyL.first + ExtraCost;
1012 }
1013 case Intrinsic::get_active_lane_mask: {
1014 auto RetTy = cast<VectorType>(ICA.getReturnType());
1015 EVT RetVT = getTLI()->getValueType(DL, RetTy);
1016 EVT OpVT = getTLI()->getValueType(DL, ICA.getArgTypes()[0]);
1017 if (getTLI()->shouldExpandGetActiveLaneMask(RetVT, OpVT))
1018 break;
1019
1020 if (RetTy->isScalableTy()) {
1021 if (TLI->getTypeAction(RetTy->getContext(), RetVT) !=
1023 break;
1024
1025 auto LT = getTypeLegalizationCost(RetTy);
1026 InstructionCost Cost = LT.first;
1027 // When SVE2p1 or SME2 is available, we can halve getTypeLegalizationCost
1028 // as get_active_lane_mask may lower to the sve_whilelo_x2 intrinsic, e.g.
1029 // nxv32i1 = get_active_lane_mask(base, idx) ->
1030 // {nxv16i1, nxv16i1} = sve_whilelo_x2(base, idx)
1031 if (ST->hasSVE2p1() || ST->hasSME2()) {
1032 Cost /= 2;
1033 if (Cost == 1)
1034 return Cost;
1035 }
1036
1037 // If more than one whilelo intrinsic is required, include the extra cost
1038 // required by the saturating add & select required to increment the
1039 // start value after the first intrinsic call.
1040 Type *OpTy = ICA.getArgTypes()[0];
1041 IntrinsicCostAttributes AddAttrs(Intrinsic::uadd_sat, OpTy, {OpTy, OpTy});
1042 InstructionCost SplitCost = getIntrinsicInstrCost(AddAttrs, CostKind);
1043 Type *CondTy = OpTy->getWithNewBitWidth(1);
1044 SplitCost += getCmpSelInstrCost(Instruction::Select, OpTy, CondTy,
1046 return Cost + (SplitCost * (Cost - 1));
1047 } else if (!getTLI()->isTypeLegal(RetVT)) {
1048 // We don't have enough context at this point to determine if the mask
1049 // is going to be kept live after the block, which will force the vXi1
1050 // type to be expanded to legal vectors of integers, e.g. v4i1->v4i32.
1051 // For now, we just assume the vectorizer created this intrinsic and
1052 // the result will be the input for a PHI. In this case the cost will
1053 // be extremely high for fixed-width vectors.
1054 // NOTE: getScalarizationOverhead returns a cost that's far too
1055 // pessimistic for the actual generated codegen. In reality there are
1056 // two instructions generated per lane.
1057 return cast<FixedVectorType>(RetTy)->getNumElements() * 2;
1058 }
1059 break;
1060 }
1061 case Intrinsic::experimental_vector_match: {
1062 auto *NeedleTy = cast<FixedVectorType>(ICA.getArgTypes()[1]);
1063 EVT SearchVT = getTLI()->getValueType(DL, ICA.getArgTypes()[0]);
1064 unsigned SearchSize = NeedleTy->getNumElements();
1065 if (!getTLI()->shouldExpandVectorMatch(SearchVT, SearchSize)) {
1066 // Base cost for MATCH instructions. At least on the Neoverse V2 and
1067 // Neoverse V3, these are cheap operations with the same latency as a
1068 // vector ADD. In most cases, however, we also need to do an extra DUP.
1069 // For fixed-length vectors we currently need an extra five--six
1070 // instructions besides the MATCH.
1072 if (isa<FixedVectorType>(RetTy))
1073 Cost += 10;
1074 return Cost;
1075 }
1076 break;
1077 }
1078 case Intrinsic::experimental_cttz_elts: {
1079 EVT ArgVT = getTLI()->getValueType(DL, ICA.getArgTypes()[0]);
1080 if (!getTLI()->shouldExpandCttzElements(ArgVT)) {
1081 // This will consist of a SVE brkb and a cntp instruction. These
1082 // typically have the same latency and half the throughput as a vector
1083 // add instruction.
1084 return 4;
1085 }
1086 break;
1087 }
1088 case Intrinsic::loop_dependence_raw_mask:
1089 case Intrinsic::loop_dependence_war_mask: {
1090 // The whilewr/rw instructions require SVE2 or SME.
1091 if (ST->hasSVE2() || ST->hasSME()) {
1092 EVT VecVT = getTLI()->getValueType(DL, RetTy);
1093 unsigned EltSizeInBytes =
1094 cast<ConstantInt>(ICA.getArgs()[2])->getZExtValue();
1095 if (!is_contained({1u, 2u, 4u, 8u}, EltSizeInBytes) ||
1096 VecVT.getVectorMinNumElements() != (16 / EltSizeInBytes))
1097 break;
1098 // For fixed-vector types we need to AND the mask with a ptrue vl<N>.
1099 return isa<FixedVectorType>(RetTy) ? 2 : 1;
1100 }
1101 break;
1102 }
1103 case Intrinsic::experimental_vector_extract_last_active:
1104 if (ST->isSVEorStreamingSVEAvailable()) {
1105 auto [LegalCost, _] = getTypeLegalizationCost(ICA.getArgTypes()[0]);
1106 // This should turn into chained clastb instructions.
1107 return LegalCost;
1108 }
1109 break;
1110 default:
1111 break;
1112 }
1114}
1115
1116/// The function will remove redundant reinterprets casting in the presence
1117/// of the control flow
1118static std::optional<Instruction *> processPhiNode(InstCombiner &IC,
1119 IntrinsicInst &II) {
1121 auto RequiredType = II.getType();
1122
1123 auto *PN = dyn_cast<PHINode>(II.getArgOperand(0));
1124 assert(PN && "Expected Phi Node!");
1125
1126 // Don't create a new Phi unless we can remove the old one.
1127 if (!PN->hasOneUse())
1128 return std::nullopt;
1129
1130 for (Value *IncValPhi : PN->incoming_values()) {
1131 auto *Reinterpret = dyn_cast<IntrinsicInst>(IncValPhi);
1132 if (!Reinterpret ||
1133 Reinterpret->getIntrinsicID() !=
1134 Intrinsic::aarch64_sve_convert_to_svbool ||
1135 RequiredType != Reinterpret->getArgOperand(0)->getType())
1136 return std::nullopt;
1137 }
1138
1139 // Create the new Phi
1140 IC.Builder.SetInsertPoint(PN);
1141 PHINode *NPN = IC.Builder.CreatePHI(RequiredType, PN->getNumIncomingValues());
1142 Worklist.push_back(PN);
1143
1144 for (unsigned I = 0; I < PN->getNumIncomingValues(); I++) {
1145 auto *Reinterpret = cast<Instruction>(PN->getIncomingValue(I));
1146 NPN->addIncoming(Reinterpret->getOperand(0), PN->getIncomingBlock(I));
1147 Worklist.push_back(Reinterpret);
1148 }
1149
1150 // Cleanup Phi Node and reinterprets
1151 return IC.replaceInstUsesWith(II, NPN);
1152}
1153
1154// A collection of properties common to SVE intrinsics that allow for combines
1155// to be written without needing to know the specific intrinsic.
1157 //
1158 // Helper routines for common intrinsic definitions.
1159 //
1160
1161 // e.g. llvm.aarch64.sve.add pg, op1, op2
1162 // with IID ==> llvm.aarch64.sve.add_u
1163 static SVEIntrinsicInfo
1170
1171 // e.g. llvm.aarch64.sve.neg inactive, pg, op
1178
1179 // e.g. llvm.aarch64.sve.fcvtnt inactive, pg, op
1185
1186 // e.g. llvm.aarch64.sve.add_u pg, op1, op2
1192
1193 // e.g. llvm.aarch64.sve.prf pg, ptr (GPIndex = 0)
1194 // llvm.aarch64.sve.st1 data, pg, ptr (GPIndex = 1)
1195 static SVEIntrinsicInfo defaultVoidOp(unsigned GPIndex) {
1196 return SVEIntrinsicInfo()
1199 }
1200
1201 // e.g. llvm.aarch64.sve.cmpeq pg, op1, op2
1202 // llvm.aarch64.sve.ld1 pg, ptr
1209
1210 // All properties relate to predication and thus having a general predicate
1211 // is the minimum requirement to say there is intrinsic info to act on.
1212 explicit operator bool() const { return hasGoverningPredicate(); }
1213
1214 //
1215 // Properties relating to the governing predicate.
1216 //
1217
1219 return GoverningPredicateIdx != std::numeric_limits<unsigned>::max();
1220 }
1221
1223 assert(hasGoverningPredicate() && "Propery not set!");
1224 return GoverningPredicateIdx;
1225 }
1226
1228 assert(!hasGoverningPredicate() && "Cannot set property twice!");
1229 GoverningPredicateIdx = Index;
1230 return *this;
1231 }
1232
1233 //
1234 // Properties relating to operations the intrinsic could be transformed into.
1235 // NOTE: This does not mean such a transformation is always possible, but the
1236 // knowledge makes it possible to reuse existing optimisations without needing
1237 // to embed specific handling for each intrinsic. For example, instruction
1238 // simplification can be used to optimise an intrinsic's active lanes.
1239 //
1240
1242 return UndefIntrinsic != Intrinsic::not_intrinsic;
1243 }
1244
1246 assert(hasMatchingUndefIntrinsic() && "Propery not set!");
1247 return UndefIntrinsic;
1248 }
1249
1251 assert(!hasMatchingUndefIntrinsic() && "Cannot set property twice!");
1252 UndefIntrinsic = IID;
1253 return *this;
1254 }
1255
1256 bool hasMatchingIROpode() const { return IROpcode != 0; }
1257
1258 unsigned getMatchingIROpode() const {
1259 assert(hasMatchingIROpode() && "Propery not set!");
1260 return IROpcode;
1261 }
1262
1264 assert(!hasMatchingIROpode() && "Cannot set property twice!");
1265 IROpcode = Opcode;
1266 return *this;
1267 }
1268
1269 //
1270 // Properties relating to the result of inactive lanes.
1271 //
1272
1274 return ResultLanes == InactiveLanesTakenFromOperand;
1275 }
1276
1278 assert(inactiveLanesTakenFromOperand() && "Propery not set!");
1279 return OperandIdxForInactiveLanes;
1280 }
1281
1283 assert(ResultLanes == Uninitialized && "Cannot set property twice!");
1284 ResultLanes = InactiveLanesTakenFromOperand;
1285 OperandIdxForInactiveLanes = Index;
1286 return *this;
1287 }
1288
1290 return ResultLanes == InactiveLanesAreNotDefined;
1291 }
1292
1294 assert(ResultLanes == Uninitialized && "Cannot set property twice!");
1295 ResultLanes = InactiveLanesAreNotDefined;
1296 return *this;
1297 }
1298
1300 return ResultLanes == InactiveLanesAreUnused;
1301 }
1302
1304 assert(ResultLanes == Uninitialized && "Cannot set property twice!");
1305 ResultLanes = InactiveLanesAreUnused;
1306 return *this;
1307 }
1308
1309 // NOTE: Whilst not limited to only inactive lanes, the common use case is:
1310 // inactiveLanesAreZeroed =
1311 // resultIsZeroInitialized() && inactiveLanesAreUnused()
1312 bool resultIsZeroInitialized() const { return ResultIsZeroInitialized; }
1313
1315 ResultIsZeroInitialized = true;
1316 return *this;
1317 }
1318
1319 //
1320 // The first operand of unary merging operations is typically only used to
1321 // set the result for inactive lanes. Knowing this allows us to deadcode the
1322 // operand when we can prove there are no inactive lanes.
1323 //
1324
1326 return OperandIdxWithNoActiveLanes != std::numeric_limits<unsigned>::max();
1327 }
1328
1330 assert(hasOperandWithNoActiveLanes() && "Propery not set!");
1331 return OperandIdxWithNoActiveLanes;
1332 }
1333
1335 assert(!hasOperandWithNoActiveLanes() && "Cannot set property twice!");
1336 OperandIdxWithNoActiveLanes = Index;
1337 return *this;
1338 }
1339
1340private:
1341 unsigned GoverningPredicateIdx = std::numeric_limits<unsigned>::max();
1342
1343 Intrinsic::ID UndefIntrinsic = Intrinsic::not_intrinsic;
1344 unsigned IROpcode = 0;
1345
1346 enum PredicationStyle {
1348 InactiveLanesTakenFromOperand,
1349 InactiveLanesAreNotDefined,
1350 InactiveLanesAreUnused
1351 } ResultLanes = Uninitialized;
1352
1353 bool ResultIsZeroInitialized = false;
1354 unsigned OperandIdxForInactiveLanes = std::numeric_limits<unsigned>::max();
1355 unsigned OperandIdxWithNoActiveLanes = std::numeric_limits<unsigned>::max();
1356};
1357
1359 // Some SVE intrinsics do not use scalable vector types, but since they are
1360 // not relevant from an SVEIntrinsicInfo perspective, they are also ignored.
1361 if (!isa<ScalableVectorType>(II.getType()) &&
1362 all_of(II.args(), [&](const Value *V) {
1363 return !isa<ScalableVectorType>(V->getType());
1364 }))
1365 return SVEIntrinsicInfo();
1366
1367 Intrinsic::ID IID = II.getIntrinsicID();
1368 switch (IID) {
1369 default:
1370 break;
1371 case Intrinsic::aarch64_sve_fcvt_bf16f32_v2:
1372 case Intrinsic::aarch64_sve_fcvt_f16f32:
1373 case Intrinsic::aarch64_sve_fcvt_f16f64:
1374 case Intrinsic::aarch64_sve_fcvt_f32f16:
1375 case Intrinsic::aarch64_sve_fcvt_f32f64:
1376 case Intrinsic::aarch64_sve_fcvt_f64f16:
1377 case Intrinsic::aarch64_sve_fcvt_f64f32:
1378 case Intrinsic::aarch64_sve_fcvtlt_f32f16:
1379 case Intrinsic::aarch64_sve_fcvtlt_f64f32:
1380 case Intrinsic::aarch64_sve_fcvtx_f32f64:
1381 case Intrinsic::aarch64_sve_fcvtzs:
1382 case Intrinsic::aarch64_sve_fcvtzs_i32f16:
1383 case Intrinsic::aarch64_sve_fcvtzs_i32f64:
1384 case Intrinsic::aarch64_sve_fcvtzs_i64f16:
1385 case Intrinsic::aarch64_sve_fcvtzs_i64f32:
1386 case Intrinsic::aarch64_sve_fcvtzu:
1387 case Intrinsic::aarch64_sve_fcvtzu_i32f16:
1388 case Intrinsic::aarch64_sve_fcvtzu_i32f64:
1389 case Intrinsic::aarch64_sve_fcvtzu_i64f16:
1390 case Intrinsic::aarch64_sve_fcvtzu_i64f32:
1391 case Intrinsic::aarch64_sve_scvtf:
1392 case Intrinsic::aarch64_sve_scvtf_f16i32:
1393 case Intrinsic::aarch64_sve_scvtf_f16i64:
1394 case Intrinsic::aarch64_sve_scvtf_f32i64:
1395 case Intrinsic::aarch64_sve_scvtf_f64i32:
1396 case Intrinsic::aarch64_sve_ucvtf:
1397 case Intrinsic::aarch64_sve_ucvtf_f16i32:
1398 case Intrinsic::aarch64_sve_ucvtf_f16i64:
1399 case Intrinsic::aarch64_sve_ucvtf_f32i64:
1400 case Intrinsic::aarch64_sve_ucvtf_f64i32:
1402
1403 case Intrinsic::aarch64_sve_fcvtnt_bf16f32_v2:
1404 case Intrinsic::aarch64_sve_fcvtnt_f16f32:
1405 case Intrinsic::aarch64_sve_fcvtnt_f32f64:
1406 case Intrinsic::aarch64_sve_fcvtxnt_f32f64:
1408
1409 case Intrinsic::aarch64_sve_fabd:
1410 return SVEIntrinsicInfo::defaultMergingOp(Intrinsic::aarch64_sve_fabd_u);
1411 case Intrinsic::aarch64_sve_fadd:
1412 return SVEIntrinsicInfo::defaultMergingOp(Intrinsic::aarch64_sve_fadd_u)
1413 .setMatchingIROpcode(Instruction::FAdd);
1414 case Intrinsic::aarch64_sve_fdiv:
1415 return SVEIntrinsicInfo::defaultMergingOp(Intrinsic::aarch64_sve_fdiv_u)
1416 .setMatchingIROpcode(Instruction::FDiv);
1417 case Intrinsic::aarch64_sve_fmax:
1418 return SVEIntrinsicInfo::defaultMergingOp(Intrinsic::aarch64_sve_fmax_u);
1419 case Intrinsic::aarch64_sve_fmaxnm:
1420 return SVEIntrinsicInfo::defaultMergingOp(Intrinsic::aarch64_sve_fmaxnm_u);
1421 case Intrinsic::aarch64_sve_fmin:
1422 return SVEIntrinsicInfo::defaultMergingOp(Intrinsic::aarch64_sve_fmin_u);
1423 case Intrinsic::aarch64_sve_fminnm:
1424 return SVEIntrinsicInfo::defaultMergingOp(Intrinsic::aarch64_sve_fminnm_u);
1425 case Intrinsic::aarch64_sve_fmla:
1426 return SVEIntrinsicInfo::defaultMergingOp(Intrinsic::aarch64_sve_fmla_u);
1427 case Intrinsic::aarch64_sve_fmls:
1428 return SVEIntrinsicInfo::defaultMergingOp(Intrinsic::aarch64_sve_fmls_u);
1429 case Intrinsic::aarch64_sve_fmul:
1430 return SVEIntrinsicInfo::defaultMergingOp(Intrinsic::aarch64_sve_fmul_u)
1431 .setMatchingIROpcode(Instruction::FMul);
1432 case Intrinsic::aarch64_sve_fmulx:
1433 return SVEIntrinsicInfo::defaultMergingOp(Intrinsic::aarch64_sve_fmulx_u);
1434 case Intrinsic::aarch64_sve_fnmla:
1435 return SVEIntrinsicInfo::defaultMergingOp(Intrinsic::aarch64_sve_fnmla_u);
1436 case Intrinsic::aarch64_sve_fnmls:
1437 return SVEIntrinsicInfo::defaultMergingOp(Intrinsic::aarch64_sve_fnmls_u);
1438 case Intrinsic::aarch64_sve_fsub:
1439 return SVEIntrinsicInfo::defaultMergingOp(Intrinsic::aarch64_sve_fsub_u)
1440 .setMatchingIROpcode(Instruction::FSub);
1441 case Intrinsic::aarch64_sve_add:
1442 return SVEIntrinsicInfo::defaultMergingOp(Intrinsic::aarch64_sve_add_u)
1443 .setMatchingIROpcode(Instruction::Add);
1444 case Intrinsic::aarch64_sve_mla:
1445 return SVEIntrinsicInfo::defaultMergingOp(Intrinsic::aarch64_sve_mla_u);
1446 case Intrinsic::aarch64_sve_mls:
1447 return SVEIntrinsicInfo::defaultMergingOp(Intrinsic::aarch64_sve_mls_u);
1448 case Intrinsic::aarch64_sve_mul:
1449 return SVEIntrinsicInfo::defaultMergingOp(Intrinsic::aarch64_sve_mul_u)
1450 .setMatchingIROpcode(Instruction::Mul);
1451 case Intrinsic::aarch64_sve_sabd:
1452 return SVEIntrinsicInfo::defaultMergingOp(Intrinsic::aarch64_sve_sabd_u);
1453 case Intrinsic::aarch64_sve_sdiv:
1454 return SVEIntrinsicInfo::defaultMergingOp(Intrinsic::aarch64_sve_sdiv_u)
1455 .setMatchingIROpcode(Instruction::SDiv);
1456 case Intrinsic::aarch64_sve_smax:
1457 return SVEIntrinsicInfo::defaultMergingOp(Intrinsic::aarch64_sve_smax_u);
1458 case Intrinsic::aarch64_sve_smin:
1459 return SVEIntrinsicInfo::defaultMergingOp(Intrinsic::aarch64_sve_smin_u);
1460 case Intrinsic::aarch64_sve_smulh:
1461 return SVEIntrinsicInfo::defaultMergingOp(Intrinsic::aarch64_sve_smulh_u);
1462 case Intrinsic::aarch64_sve_sub:
1463 return SVEIntrinsicInfo::defaultMergingOp(Intrinsic::aarch64_sve_sub_u)
1464 .setMatchingIROpcode(Instruction::Sub);
1465 case Intrinsic::aarch64_sve_uabd:
1466 return SVEIntrinsicInfo::defaultMergingOp(Intrinsic::aarch64_sve_uabd_u);
1467 case Intrinsic::aarch64_sve_udiv:
1468 return SVEIntrinsicInfo::defaultMergingOp(Intrinsic::aarch64_sve_udiv_u)
1469 .setMatchingIROpcode(Instruction::UDiv);
1470 case Intrinsic::aarch64_sve_umax:
1471 return SVEIntrinsicInfo::defaultMergingOp(Intrinsic::aarch64_sve_umax_u);
1472 case Intrinsic::aarch64_sve_umin:
1473 return SVEIntrinsicInfo::defaultMergingOp(Intrinsic::aarch64_sve_umin_u);
1474 case Intrinsic::aarch64_sve_umulh:
1475 return SVEIntrinsicInfo::defaultMergingOp(Intrinsic::aarch64_sve_umulh_u);
1476 case Intrinsic::aarch64_sve_asr:
1477 return SVEIntrinsicInfo::defaultMergingOp(Intrinsic::aarch64_sve_asr_u)
1478 .setMatchingIROpcode(Instruction::AShr);
1479 case Intrinsic::aarch64_sve_lsl:
1480 return SVEIntrinsicInfo::defaultMergingOp(Intrinsic::aarch64_sve_lsl_u)
1481 .setMatchingIROpcode(Instruction::Shl);
1482 case Intrinsic::aarch64_sve_lsr:
1483 return SVEIntrinsicInfo::defaultMergingOp(Intrinsic::aarch64_sve_lsr_u)
1484 .setMatchingIROpcode(Instruction::LShr);
1485 case Intrinsic::aarch64_sve_and:
1486 return SVEIntrinsicInfo::defaultMergingOp(Intrinsic::aarch64_sve_and_u)
1487 .setMatchingIROpcode(Instruction::And);
1488 case Intrinsic::aarch64_sve_bic:
1489 return SVEIntrinsicInfo::defaultMergingOp(Intrinsic::aarch64_sve_bic_u);
1490 case Intrinsic::aarch64_sve_eor:
1491 return SVEIntrinsicInfo::defaultMergingOp(Intrinsic::aarch64_sve_eor_u)
1492 .setMatchingIROpcode(Instruction::Xor);
1493 case Intrinsic::aarch64_sve_orr:
1494 return SVEIntrinsicInfo::defaultMergingOp(Intrinsic::aarch64_sve_orr_u)
1495 .setMatchingIROpcode(Instruction::Or);
1496 case Intrinsic::aarch64_sve_shsub:
1497 return SVEIntrinsicInfo::defaultMergingOp(Intrinsic::aarch64_sve_shsub_u);
1498 case Intrinsic::aarch64_sve_shsubr:
1500 case Intrinsic::aarch64_sve_sqrshl:
1501 return SVEIntrinsicInfo::defaultMergingOp(Intrinsic::aarch64_sve_sqrshl_u);
1502 case Intrinsic::aarch64_sve_sqshl:
1503 return SVEIntrinsicInfo::defaultMergingOp(Intrinsic::aarch64_sve_sqshl_u);
1504 case Intrinsic::aarch64_sve_sqsub:
1505 return SVEIntrinsicInfo::defaultMergingOp(Intrinsic::aarch64_sve_sqsub_u);
1506 case Intrinsic::aarch64_sve_srshl:
1507 return SVEIntrinsicInfo::defaultMergingOp(Intrinsic::aarch64_sve_srshl_u);
1508 case Intrinsic::aarch64_sve_uhsub:
1509 return SVEIntrinsicInfo::defaultMergingOp(Intrinsic::aarch64_sve_uhsub_u);
1510 case Intrinsic::aarch64_sve_uhsubr:
1512 case Intrinsic::aarch64_sve_uqrshl:
1513 return SVEIntrinsicInfo::defaultMergingOp(Intrinsic::aarch64_sve_uqrshl_u);
1514 case Intrinsic::aarch64_sve_uqshl:
1515 return SVEIntrinsicInfo::defaultMergingOp(Intrinsic::aarch64_sve_uqshl_u);
1516 case Intrinsic::aarch64_sve_uqsub:
1517 return SVEIntrinsicInfo::defaultMergingOp(Intrinsic::aarch64_sve_uqsub_u);
1518 case Intrinsic::aarch64_sve_urshl:
1519 return SVEIntrinsicInfo::defaultMergingOp(Intrinsic::aarch64_sve_urshl_u);
1520
1521 case Intrinsic::aarch64_sve_add_u:
1523 Instruction::Add);
1524 case Intrinsic::aarch64_sve_and_u:
1526 Instruction::And);
1527 case Intrinsic::aarch64_sve_asr_u:
1529 Instruction::AShr);
1530 case Intrinsic::aarch64_sve_eor_u:
1532 Instruction::Xor);
1533 case Intrinsic::aarch64_sve_fadd_u:
1535 Instruction::FAdd);
1536 case Intrinsic::aarch64_sve_fdiv_u:
1538 Instruction::FDiv);
1539 case Intrinsic::aarch64_sve_fmul_u:
1541 Instruction::FMul);
1542 case Intrinsic::aarch64_sve_fsub_u:
1544 Instruction::FSub);
1545 case Intrinsic::aarch64_sve_lsl_u:
1547 Instruction::Shl);
1548 case Intrinsic::aarch64_sve_lsr_u:
1550 Instruction::LShr);
1551 case Intrinsic::aarch64_sve_mul_u:
1553 Instruction::Mul);
1554 case Intrinsic::aarch64_sve_orr_u:
1556 Instruction::Or);
1557 case Intrinsic::aarch64_sve_sdiv_u:
1559 Instruction::SDiv);
1560 case Intrinsic::aarch64_sve_sub_u:
1562 Instruction::Sub);
1563 case Intrinsic::aarch64_sve_udiv_u:
1565 Instruction::UDiv);
1566
1567 case Intrinsic::aarch64_sve_addqv:
1568 case Intrinsic::aarch64_sve_and_z:
1569 case Intrinsic::aarch64_sve_bic_z:
1570 case Intrinsic::aarch64_sve_brka_z:
1571 case Intrinsic::aarch64_sve_brkb_z:
1572 case Intrinsic::aarch64_sve_brkn_z:
1573 case Intrinsic::aarch64_sve_brkpa_z:
1574 case Intrinsic::aarch64_sve_brkpb_z:
1575 case Intrinsic::aarch64_sve_cntp:
1576 case Intrinsic::aarch64_sve_compact:
1577 case Intrinsic::aarch64_sve_eor_z:
1578 case Intrinsic::aarch64_sve_eorv:
1579 case Intrinsic::aarch64_sve_eorqv:
1580 case Intrinsic::aarch64_sve_nand_z:
1581 case Intrinsic::aarch64_sve_nor_z:
1582 case Intrinsic::aarch64_sve_orn_z:
1583 case Intrinsic::aarch64_sve_orr_z:
1584 case Intrinsic::aarch64_sve_orv:
1585 case Intrinsic::aarch64_sve_orqv:
1586 case Intrinsic::aarch64_sve_pnext:
1587 case Intrinsic::aarch64_sve_rdffr_z:
1588 case Intrinsic::aarch64_sve_saddv:
1589 case Intrinsic::aarch64_sve_uaddv:
1590 case Intrinsic::aarch64_sve_umaxv:
1591 case Intrinsic::aarch64_sve_umaxqv:
1592 case Intrinsic::aarch64_sve_cmpeq:
1593 case Intrinsic::aarch64_sve_cmpeq_wide:
1594 case Intrinsic::aarch64_sve_cmpge:
1595 case Intrinsic::aarch64_sve_cmpge_wide:
1596 case Intrinsic::aarch64_sve_cmpgt:
1597 case Intrinsic::aarch64_sve_cmpgt_wide:
1598 case Intrinsic::aarch64_sve_cmphi:
1599 case Intrinsic::aarch64_sve_cmphi_wide:
1600 case Intrinsic::aarch64_sve_cmphs:
1601 case Intrinsic::aarch64_sve_cmphs_wide:
1602 case Intrinsic::aarch64_sve_cmple_wide:
1603 case Intrinsic::aarch64_sve_cmplo_wide:
1604 case Intrinsic::aarch64_sve_cmpls_wide:
1605 case Intrinsic::aarch64_sve_cmplt_wide:
1606 case Intrinsic::aarch64_sve_cmpne:
1607 case Intrinsic::aarch64_sve_cmpne_wide:
1608 case Intrinsic::aarch64_sve_facge:
1609 case Intrinsic::aarch64_sve_facgt:
1610 case Intrinsic::aarch64_sve_fcmpeq:
1611 case Intrinsic::aarch64_sve_fcmpge:
1612 case Intrinsic::aarch64_sve_fcmpgt:
1613 case Intrinsic::aarch64_sve_fcmpne:
1614 case Intrinsic::aarch64_sve_fcmpuo:
1615 case Intrinsic::aarch64_sve_ld1:
1616 case Intrinsic::aarch64_sve_ld1_gather:
1617 case Intrinsic::aarch64_sve_ld1_gather_index:
1618 case Intrinsic::aarch64_sve_ld1_gather_scalar_offset:
1619 case Intrinsic::aarch64_sve_ld1_gather_sxtw:
1620 case Intrinsic::aarch64_sve_ld1_gather_sxtw_index:
1621 case Intrinsic::aarch64_sve_ld1_gather_uxtw:
1622 case Intrinsic::aarch64_sve_ld1_gather_uxtw_index:
1623 case Intrinsic::aarch64_sve_ld1q_gather_index:
1624 case Intrinsic::aarch64_sve_ld1q_gather_scalar_offset:
1625 case Intrinsic::aarch64_sve_ld1q_gather_vector_offset:
1626 case Intrinsic::aarch64_sve_ld1ro:
1627 case Intrinsic::aarch64_sve_ld1rq:
1628 case Intrinsic::aarch64_sve_ld1udq:
1629 case Intrinsic::aarch64_sve_ld1uwq:
1630 case Intrinsic::aarch64_sve_ld2_sret:
1631 case Intrinsic::aarch64_sve_ld2q_sret:
1632 case Intrinsic::aarch64_sve_ld3_sret:
1633 case Intrinsic::aarch64_sve_ld3q_sret:
1634 case Intrinsic::aarch64_sve_ld4_sret:
1635 case Intrinsic::aarch64_sve_ld4q_sret:
1636 case Intrinsic::aarch64_sve_ldff1:
1637 case Intrinsic::aarch64_sve_ldff1_gather:
1638 case Intrinsic::aarch64_sve_ldff1_gather_index:
1639 case Intrinsic::aarch64_sve_ldff1_gather_scalar_offset:
1640 case Intrinsic::aarch64_sve_ldff1_gather_sxtw:
1641 case Intrinsic::aarch64_sve_ldff1_gather_sxtw_index:
1642 case Intrinsic::aarch64_sve_ldff1_gather_uxtw:
1643 case Intrinsic::aarch64_sve_ldff1_gather_uxtw_index:
1644 case Intrinsic::aarch64_sve_ldnf1:
1645 case Intrinsic::aarch64_sve_ldnt1:
1646 case Intrinsic::aarch64_sve_ldnt1_gather:
1647 case Intrinsic::aarch64_sve_ldnt1_gather_index:
1648 case Intrinsic::aarch64_sve_ldnt1_gather_scalar_offset:
1649 case Intrinsic::aarch64_sve_ldnt1_gather_uxtw:
1651
1652 case Intrinsic::aarch64_sve_prf:
1653 case Intrinsic::aarch64_sve_prfb_gather_index:
1654 case Intrinsic::aarch64_sve_prfb_gather_scalar_offset:
1655 case Intrinsic::aarch64_sve_prfb_gather_sxtw_index:
1656 case Intrinsic::aarch64_sve_prfb_gather_uxtw_index:
1657 case Intrinsic::aarch64_sve_prfd_gather_index:
1658 case Intrinsic::aarch64_sve_prfd_gather_scalar_offset:
1659 case Intrinsic::aarch64_sve_prfd_gather_sxtw_index:
1660 case Intrinsic::aarch64_sve_prfd_gather_uxtw_index:
1661 case Intrinsic::aarch64_sve_prfh_gather_index:
1662 case Intrinsic::aarch64_sve_prfh_gather_scalar_offset:
1663 case Intrinsic::aarch64_sve_prfh_gather_sxtw_index:
1664 case Intrinsic::aarch64_sve_prfh_gather_uxtw_index:
1665 case Intrinsic::aarch64_sve_prfw_gather_index:
1666 case Intrinsic::aarch64_sve_prfw_gather_scalar_offset:
1667 case Intrinsic::aarch64_sve_prfw_gather_sxtw_index:
1668 case Intrinsic::aarch64_sve_prfw_gather_uxtw_index:
1670
1671 case Intrinsic::aarch64_sve_st1_scatter:
1672 case Intrinsic::aarch64_sve_st1_scatter_scalar_offset:
1673 case Intrinsic::aarch64_sve_st1_scatter_sxtw:
1674 case Intrinsic::aarch64_sve_st1_scatter_sxtw_index:
1675 case Intrinsic::aarch64_sve_st1_scatter_uxtw:
1676 case Intrinsic::aarch64_sve_st1_scatter_uxtw_index:
1677 case Intrinsic::aarch64_sve_st1dq:
1678 case Intrinsic::aarch64_sve_st1q_scatter_index:
1679 case Intrinsic::aarch64_sve_st1q_scatter_scalar_offset:
1680 case Intrinsic::aarch64_sve_st1q_scatter_vector_offset:
1681 case Intrinsic::aarch64_sve_st1wq:
1682 case Intrinsic::aarch64_sve_stnt1:
1683 case Intrinsic::aarch64_sve_stnt1_scatter:
1684 case Intrinsic::aarch64_sve_stnt1_scatter_index:
1685 case Intrinsic::aarch64_sve_stnt1_scatter_scalar_offset:
1686 case Intrinsic::aarch64_sve_stnt1_scatter_uxtw:
1688 case Intrinsic::aarch64_sve_st2:
1689 case Intrinsic::aarch64_sve_st2q:
1691 case Intrinsic::aarch64_sve_st3:
1692 case Intrinsic::aarch64_sve_st3q:
1694 case Intrinsic::aarch64_sve_st4:
1695 case Intrinsic::aarch64_sve_st4q:
1697 }
1698
1699 return SVEIntrinsicInfo();
1700}
1701
1702static bool isAllActivePredicate(Value *Pred) {
1703 Value *UncastedPred;
1704
1705 // Look through predicate casts that only remove lanes.
1707 m_Value(UncastedPred)))) {
1708 auto *OrigPredTy = cast<ScalableVectorType>(Pred->getType());
1709 Pred = UncastedPred;
1710
1712 m_Value(UncastedPred))))
1713 // If the predicate has the same or less lanes than the uncasted predicate
1714 // then we know the casting has no effect.
1715 if (OrigPredTy->getMinNumElements() <=
1716 cast<ScalableVectorType>(UncastedPred->getType())
1717 ->getMinNumElements())
1718 Pred = UncastedPred;
1719 }
1720
1721 auto *C = dyn_cast<Constant>(Pred);
1722 return C && C->isAllOnesValue();
1723}
1724
1725// Simplify `V` by only considering the operations that affect active lanes.
1726// This function should only return existing Values or newly created Constants.
1727static Value *stripInactiveLanes(Value *V, const Value *Pg) {
1728 auto *Dup = dyn_cast<IntrinsicInst>(V);
1729 if (Dup && Dup->getIntrinsicID() == Intrinsic::aarch64_sve_dup &&
1730 Dup->getOperand(1) == Pg && isa<Constant>(Dup->getOperand(2)))
1732 cast<VectorType>(V->getType())->getElementCount(),
1733 cast<Constant>(Dup->getOperand(2)));
1734
1735 return V;
1736}
1737
1738static std::optional<Instruction *>
1740 const SVEIntrinsicInfo &IInfo) {
1741 const unsigned Opc = IInfo.getMatchingIROpode();
1742 assert(Instruction::isBinaryOp(Opc) && "Expected a binary operation!");
1743
1744 Value *Pg = II.getOperand(0);
1745 Value *Op1 = II.getOperand(1);
1746 Value *Op2 = II.getOperand(2);
1747 const DataLayout &DL = II.getDataLayout();
1748
1749 // Canonicalise constants to the RHS.
1751 isa<Constant>(Op1) && !isa<Constant>(Op2)) {
1752 IC.replaceOperand(II, 1, Op2);
1753 IC.replaceOperand(II, 2, Op1);
1754 return &II;
1755 }
1756
1757 // Only active lanes matter when simplifying the operation.
1758 Op1 = stripInactiveLanes(Op1, Pg);
1759 Op2 = stripInactiveLanes(Op2, Pg);
1760
1761 Value *SimpleII;
1762 if (auto FII = dyn_cast<FPMathOperator>(&II))
1763 SimpleII = simplifyBinOp(Opc, Op1, Op2, FII->getFastMathFlags(), DL);
1764 else
1765 SimpleII = simplifyBinOp(Opc, Op1, Op2, DL);
1766
1767 // An SVE intrinsic's result is always defined. However, this is not the case
1768 // for its equivalent IR instruction (e.g. when shifting by an amount more
1769 // than the data's bitwidth). Simplifications to an undefined result must be
1770 // ignored to preserve the intrinsic's expected behaviour.
1771 if (!SimpleII || isa<UndefValue>(SimpleII))
1772 return std::nullopt;
1773
1774 if (IInfo.inactiveLanesAreNotDefined())
1775 return IC.replaceInstUsesWith(II, SimpleII);
1776
1777 Value *Inactive = II.getOperand(IInfo.getOperandIdxInactiveLanesTakenFrom());
1778
1779 // The intrinsic does nothing (e.g. sve.mul(pg, A, 1.0)).
1780 if (SimpleII == Inactive)
1781 return IC.replaceInstUsesWith(II, SimpleII);
1782
1783 // Inactive lanes must be preserved.
1784 SimpleII = IC.Builder.CreateSelect(Pg, SimpleII, Inactive);
1785 return IC.replaceInstUsesWith(II, SimpleII);
1786}
1787
1788// Use SVE intrinsic info to eliminate redundant operands and/or canonicalise
1789// to operations with less strict inactive lane requirements.
1790static std::optional<Instruction *>
1792 const SVEIntrinsicInfo &IInfo) {
1793 if (!IInfo.hasGoverningPredicate())
1794 return std::nullopt;
1795
1796 auto *OpPredicate = II.getOperand(IInfo.getGoverningPredicateOperandIdx());
1797
1798 // If there are no active lanes.
1799 if (match(OpPredicate, m_ZeroInt())) {
1801 return IC.replaceInstUsesWith(
1802 II, II.getOperand(IInfo.getOperandIdxInactiveLanesTakenFrom()));
1803
1804 if (IInfo.inactiveLanesAreUnused()) {
1805 if (IInfo.resultIsZeroInitialized())
1807
1808 return IC.eraseInstFromFunction(II);
1809 }
1810 }
1811
1812 // If there are no inactive lanes.
1813 if (isAllActivePredicate(OpPredicate)) {
1814 if (IInfo.hasOperandWithNoActiveLanes()) {
1815 unsigned OpIdx = IInfo.getOperandIdxWithNoActiveLanes();
1816 if (!isa<UndefValue>(II.getOperand(OpIdx)))
1817 return IC.replaceOperand(II, OpIdx, UndefValue::get(II.getType()));
1818 }
1819
1820 if (IInfo.hasMatchingUndefIntrinsic()) {
1821 auto *NewDecl = Intrinsic::getOrInsertDeclaration(
1822 II.getModule(), IInfo.getMatchingUndefIntrinsic(), {II.getType()});
1823 II.setCalledFunction(NewDecl);
1824 return &II;
1825 }
1826 }
1827
1828 // Operation specific simplifications.
1829 if (IInfo.hasMatchingIROpode() &&
1831 return simplifySVEIntrinsicBinOp(IC, II, IInfo);
1832
1833 return std::nullopt;
1834}
1835
1836// (from_svbool (binop (to_svbool pred) (svbool_t _) (svbool_t _))))
1837// => (binop (pred) (from_svbool _) (from_svbool _))
1838//
1839// The above transformation eliminates a `to_svbool` in the predicate
1840// operand of bitwise operation `binop` by narrowing the vector width of
1841// the operation. For example, it would convert a `<vscale x 16 x i1>
1842// and` into a `<vscale x 4 x i1> and`. This is profitable because
1843// to_svbool must zero the new lanes during widening, whereas
1844// from_svbool is free.
1845static std::optional<Instruction *>
1847 auto BinOp = dyn_cast<IntrinsicInst>(II.getOperand(0));
1848 if (!BinOp)
1849 return std::nullopt;
1850
1851 auto IntrinsicID = BinOp->getIntrinsicID();
1852 switch (IntrinsicID) {
1853 case Intrinsic::aarch64_sve_and_z:
1854 case Intrinsic::aarch64_sve_bic_z:
1855 case Intrinsic::aarch64_sve_eor_z:
1856 case Intrinsic::aarch64_sve_nand_z:
1857 case Intrinsic::aarch64_sve_nor_z:
1858 case Intrinsic::aarch64_sve_orn_z:
1859 case Intrinsic::aarch64_sve_orr_z:
1860 break;
1861 default:
1862 return std::nullopt;
1863 }
1864
1865 auto BinOpPred = BinOp->getOperand(0);
1866 auto BinOpOp1 = BinOp->getOperand(1);
1867 auto BinOpOp2 = BinOp->getOperand(2);
1868
1869 auto PredIntr = dyn_cast<IntrinsicInst>(BinOpPred);
1870 if (!PredIntr ||
1871 PredIntr->getIntrinsicID() != Intrinsic::aarch64_sve_convert_to_svbool)
1872 return std::nullopt;
1873
1874 auto PredOp = PredIntr->getOperand(0);
1875 auto PredOpTy = cast<VectorType>(PredOp->getType());
1876 if (PredOpTy != II.getType())
1877 return std::nullopt;
1878
1879 SmallVector<Value *> NarrowedBinOpArgs = {PredOp};
1880 auto NarrowBinOpOp1 = IC.Builder.CreateIntrinsic(
1881 Intrinsic::aarch64_sve_convert_from_svbool, {PredOpTy}, {BinOpOp1});
1882 NarrowedBinOpArgs.push_back(NarrowBinOpOp1);
1883 if (BinOpOp1 == BinOpOp2)
1884 NarrowedBinOpArgs.push_back(NarrowBinOpOp1);
1885 else
1886 NarrowedBinOpArgs.push_back(IC.Builder.CreateIntrinsic(
1887 Intrinsic::aarch64_sve_convert_from_svbool, {PredOpTy}, {BinOpOp2}));
1888
1889 auto NarrowedBinOp =
1890 IC.Builder.CreateIntrinsic(IntrinsicID, {PredOpTy}, NarrowedBinOpArgs);
1891 return IC.replaceInstUsesWith(II, NarrowedBinOp);
1892}
1893
1894static std::optional<Instruction *>
1896 // If the reinterpret instruction operand is a PHI Node
1897 if (isa<PHINode>(II.getArgOperand(0)))
1898 return processPhiNode(IC, II);
1899
1900 if (auto BinOpCombine = tryCombineFromSVBoolBinOp(IC, II))
1901 return BinOpCombine;
1902
1903 // Ignore converts to/from svcount_t.
1904 if (isa<TargetExtType>(II.getArgOperand(0)->getType()) ||
1905 isa<TargetExtType>(II.getType()))
1906 return std::nullopt;
1907
1908 SmallVector<Instruction *, 32> CandidatesForRemoval;
1909 Value *Cursor = II.getOperand(0), *EarliestReplacement = nullptr;
1910
1911 const auto *IVTy = cast<VectorType>(II.getType());
1912
1913 // Walk the chain of conversions.
1914 while (Cursor) {
1915 // If the type of the cursor has fewer lanes than the final result, zeroing
1916 // must take place, which breaks the equivalence chain.
1917 const auto *CursorVTy = cast<VectorType>(Cursor->getType());
1918 if (CursorVTy->getElementCount().getKnownMinValue() <
1919 IVTy->getElementCount().getKnownMinValue())
1920 break;
1921
1922 // If the cursor has the same type as I, it is a viable replacement.
1923 if (Cursor->getType() == IVTy)
1924 EarliestReplacement = Cursor;
1925
1926 auto *IntrinsicCursor = dyn_cast<IntrinsicInst>(Cursor);
1927
1928 // If this is not an SVE conversion intrinsic, this is the end of the chain.
1929 if (!IntrinsicCursor || !(IntrinsicCursor->getIntrinsicID() ==
1930 Intrinsic::aarch64_sve_convert_to_svbool ||
1931 IntrinsicCursor->getIntrinsicID() ==
1932 Intrinsic::aarch64_sve_convert_from_svbool))
1933 break;
1934
1935 CandidatesForRemoval.insert(CandidatesForRemoval.begin(), IntrinsicCursor);
1936 Cursor = IntrinsicCursor->getOperand(0);
1937 }
1938
1939 // If no viable replacement in the conversion chain was found, there is
1940 // nothing to do.
1941 if (!EarliestReplacement)
1942 return std::nullopt;
1943
1944 return IC.replaceInstUsesWith(II, EarliestReplacement);
1945}
1946
1947static std::optional<Instruction *> instCombineSVESel(InstCombiner &IC,
1948 IntrinsicInst &II) {
1949 // svsel(ptrue, x, y) => x
1950 auto *OpPredicate = II.getOperand(0);
1951 if (isAllActivePredicate(OpPredicate))
1952 return IC.replaceInstUsesWith(II, II.getOperand(1));
1953
1954 auto Select =
1955 IC.Builder.CreateSelect(OpPredicate, II.getOperand(1), II.getOperand(2));
1956 return IC.replaceInstUsesWith(II, Select);
1957}
1958
1959static std::optional<Instruction *> instCombineSVEDup(InstCombiner &IC,
1960 IntrinsicInst &II) {
1961 Value *Pg = II.getOperand(1);
1962
1963 // sve.dup(V, all_active, X) ==> splat(X)
1964 if (isAllActivePredicate(Pg)) {
1965 auto *RetTy = cast<ScalableVectorType>(II.getType());
1966 Value *Splat = IC.Builder.CreateVectorSplat(RetTy->getElementCount(),
1967 II.getArgOperand(2));
1968 return IC.replaceInstUsesWith(II, Splat);
1969 }
1970
1972 m_SpecificInt(AArch64SVEPredPattern::vl1))))
1973 return std::nullopt;
1974
1975 // sve.dup(V, sve.ptrue(vl1), X) ==> insertelement V, X, 0
1976 Value *Insert = IC.Builder.CreateInsertElement(
1977 II.getArgOperand(0), II.getArgOperand(2), uint64_t(0));
1978 return IC.replaceInstUsesWith(II, Insert);
1979}
1980
1981static std::optional<Instruction *> instCombineSVEDupX(InstCombiner &IC,
1982 IntrinsicInst &II) {
1983 // Replace DupX with a regular IR splat.
1984 auto *RetTy = cast<ScalableVectorType>(II.getType());
1985 Value *Splat = IC.Builder.CreateVectorSplat(RetTy->getElementCount(),
1986 II.getArgOperand(0));
1987 Splat->takeName(&II);
1988 return IC.replaceInstUsesWith(II, Splat);
1989}
1990
1991static std::optional<Instruction *> instCombineSVECmpNE(InstCombiner &IC,
1992 IntrinsicInst &II) {
1993 LLVMContext &Ctx = II.getContext();
1994
1995 if (!isAllActivePredicate(II.getArgOperand(0)))
1996 return std::nullopt;
1997
1998 // Check that we have a compare of zero..
1999 auto *SplatValue =
2001 if (!SplatValue || !SplatValue->isZero())
2002 return std::nullopt;
2003
2004 // ..against a dupq
2005 auto *DupQLane = dyn_cast<IntrinsicInst>(II.getArgOperand(1));
2006 if (!DupQLane ||
2007 DupQLane->getIntrinsicID() != Intrinsic::aarch64_sve_dupq_lane)
2008 return std::nullopt;
2009
2010 // Where the dupq is a lane 0 replicate of a vector insert
2011 auto *DupQLaneIdx = dyn_cast<ConstantInt>(DupQLane->getArgOperand(1));
2012 if (!DupQLaneIdx || !DupQLaneIdx->isZero())
2013 return std::nullopt;
2014
2015 auto *VecIns = dyn_cast<IntrinsicInst>(DupQLane->getArgOperand(0));
2016 if (!VecIns || VecIns->getIntrinsicID() != Intrinsic::vector_insert)
2017 return std::nullopt;
2018
2019 // Where the vector insert is a fixed constant vector insert into undef at
2020 // index zero
2021 if (!isa<UndefValue>(VecIns->getArgOperand(0)))
2022 return std::nullopt;
2023
2024 if (!cast<ConstantInt>(VecIns->getArgOperand(2))->isZero())
2025 return std::nullopt;
2026
2027 auto *ConstVec = dyn_cast<Constant>(VecIns->getArgOperand(1));
2028 if (!ConstVec)
2029 return std::nullopt;
2030
2031 auto *VecTy = dyn_cast<FixedVectorType>(ConstVec->getType());
2032 auto *OutTy = dyn_cast<ScalableVectorType>(II.getType());
2033 if (!VecTy || !OutTy || VecTy->getNumElements() != OutTy->getMinNumElements())
2034 return std::nullopt;
2035
2036 unsigned NumElts = VecTy->getNumElements();
2037 unsigned PredicateBits = 0;
2038
2039 // Expand intrinsic operands to a 16-bit byte level predicate
2040 for (unsigned I = 0; I < NumElts; ++I) {
2041 auto *Arg = dyn_cast<ConstantInt>(ConstVec->getAggregateElement(I));
2042 if (!Arg)
2043 return std::nullopt;
2044 if (!Arg->isZero())
2045 PredicateBits |= 1 << (I * (16 / NumElts));
2046 }
2047
2048 // If all bits are zero bail early with an empty predicate
2049 if (PredicateBits == 0) {
2050 auto *PFalse = Constant::getNullValue(II.getType());
2051 PFalse->takeName(&II);
2052 return IC.replaceInstUsesWith(II, PFalse);
2053 }
2054
2055 // Calculate largest predicate type used (where byte predicate is largest)
2056 unsigned Mask = 8;
2057 for (unsigned I = 0; I < 16; ++I)
2058 if ((PredicateBits & (1 << I)) != 0)
2059 Mask |= (I % 8);
2060
2061 unsigned PredSize = Mask & -Mask;
2062 auto *PredType = ScalableVectorType::get(
2063 Type::getInt1Ty(Ctx), AArch64::SVEBitsPerBlock / (PredSize * 8));
2064
2065 // Ensure all relevant bits are set
2066 for (unsigned I = 0; I < 16; I += PredSize)
2067 if ((PredicateBits & (1 << I)) == 0)
2068 return std::nullopt;
2069
2070 auto *PTruePat =
2071 ConstantInt::get(Type::getInt32Ty(Ctx), AArch64SVEPredPattern::all);
2072 auto *PTrue = IC.Builder.CreateIntrinsic(Intrinsic::aarch64_sve_ptrue,
2073 {PredType}, {PTruePat});
2074 auto *ConvertToSVBool = IC.Builder.CreateIntrinsic(
2075 Intrinsic::aarch64_sve_convert_to_svbool, {PredType}, {PTrue});
2076 auto *ConvertFromSVBool =
2077 IC.Builder.CreateIntrinsic(Intrinsic::aarch64_sve_convert_from_svbool,
2078 {II.getType()}, {ConvertToSVBool});
2079
2080 ConvertFromSVBool->takeName(&II);
2081 return IC.replaceInstUsesWith(II, ConvertFromSVBool);
2082}
2083
2084static std::optional<Instruction *> instCombineSVELast(InstCombiner &IC,
2085 IntrinsicInst &II) {
2086 Value *Pg = II.getArgOperand(0);
2087 Value *Vec = II.getArgOperand(1);
2088 auto IntrinsicID = II.getIntrinsicID();
2089 bool IsAfter = IntrinsicID == Intrinsic::aarch64_sve_lasta;
2090
2091 // lastX(splat(X)) --> X
2092 if (auto *SplatVal = getSplatValue(Vec))
2093 return IC.replaceInstUsesWith(II, SplatVal);
2094
2095 // If x and/or y is a splat value then:
2096 // lastX (binop (x, y)) --> binop(lastX(x), lastX(y))
2097 Value *LHS, *RHS;
2098 if (match(Vec, m_OneUse(m_BinOp(m_Value(LHS), m_Value(RHS))))) {
2099 if (isSplatValue(LHS) || isSplatValue(RHS)) {
2100 auto *OldBinOp = cast<BinaryOperator>(Vec);
2101 auto OpC = OldBinOp->getOpcode();
2102 auto *NewLHS =
2103 IC.Builder.CreateIntrinsic(IntrinsicID, {Vec->getType()}, {Pg, LHS});
2104 auto *NewRHS =
2105 IC.Builder.CreateIntrinsic(IntrinsicID, {Vec->getType()}, {Pg, RHS});
2107 OpC, NewLHS, NewRHS, OldBinOp, OldBinOp->getName(), II.getIterator());
2108 return IC.replaceInstUsesWith(II, NewBinOp);
2109 }
2110 }
2111
2112 auto *C = dyn_cast<Constant>(Pg);
2113 if (IsAfter && C && C->isNullValue()) {
2114 // The intrinsic is extracting lane 0 so use an extract instead.
2115 auto *IdxTy = Type::getInt64Ty(II.getContext());
2116 auto *Extract = ExtractElementInst::Create(Vec, ConstantInt::get(IdxTy, 0));
2117 Extract->insertBefore(II.getIterator());
2118 Extract->takeName(&II);
2119 return IC.replaceInstUsesWith(II, Extract);
2120 }
2121
2122 auto *IntrPG = dyn_cast<IntrinsicInst>(Pg);
2123 if (!IntrPG)
2124 return std::nullopt;
2125
2126 if (IntrPG->getIntrinsicID() != Intrinsic::aarch64_sve_ptrue)
2127 return std::nullopt;
2128
2129 const auto PTruePattern =
2130 cast<ConstantInt>(IntrPG->getOperand(0))->getZExtValue();
2131
2132 // Can the intrinsic's predicate be converted to a known constant index?
2133 unsigned MinNumElts = getNumElementsFromSVEPredPattern(PTruePattern);
2134 if (!MinNumElts)
2135 return std::nullopt;
2136
2137 unsigned Idx = MinNumElts - 1;
2138 // Increment the index if extracting the element after the last active
2139 // predicate element.
2140 if (IsAfter)
2141 ++Idx;
2142
2143 // Ignore extracts whose index is larger than the known minimum vector
2144 // length. NOTE: This is an artificial constraint where we prefer to
2145 // maintain what the user asked for until an alternative is proven faster.
2146 auto *PgVTy = cast<ScalableVectorType>(Pg->getType());
2147 if (Idx >= PgVTy->getMinNumElements())
2148 return std::nullopt;
2149
2150 // The intrinsic is extracting a fixed lane so use an extract instead.
2151 auto *IdxTy = Type::getInt64Ty(II.getContext());
2152 auto *Extract = ExtractElementInst::Create(Vec, ConstantInt::get(IdxTy, Idx));
2153 Extract->insertBefore(II.getIterator());
2154 Extract->takeName(&II);
2155 return IC.replaceInstUsesWith(II, Extract);
2156}
2157
2158static std::optional<Instruction *> instCombineSVECondLast(InstCombiner &IC,
2159 IntrinsicInst &II) {
2160 // The SIMD&FP variant of CLAST[AB] is significantly faster than the scalar
2161 // integer variant across a variety of micro-architectures. Replace scalar
2162 // integer CLAST[AB] intrinsic with optimal SIMD&FP variant. A simple
2163 // bitcast-to-fp + clast[ab] + bitcast-to-int will cost a cycle or two more
2164 // depending on the micro-architecture, but has been observed as generally
2165 // being faster, particularly when the CLAST[AB] op is a loop-carried
2166 // dependency.
2167 Value *Pg = II.getArgOperand(0);
2168 Value *Fallback = II.getArgOperand(1);
2169 Value *Vec = II.getArgOperand(2);
2170 Type *Ty = II.getType();
2171
2172 if (!Ty->isIntegerTy())
2173 return std::nullopt;
2174
2175 Type *FPTy;
2176 switch (cast<IntegerType>(Ty)->getBitWidth()) {
2177 default:
2178 return std::nullopt;
2179 case 16:
2180 FPTy = IC.Builder.getHalfTy();
2181 break;
2182 case 32:
2183 FPTy = IC.Builder.getFloatTy();
2184 break;
2185 case 64:
2186 FPTy = IC.Builder.getDoubleTy();
2187 break;
2188 }
2189
2190 Value *FPFallBack = IC.Builder.CreateBitCast(Fallback, FPTy);
2191 auto *FPVTy = VectorType::get(
2192 FPTy, cast<VectorType>(Vec->getType())->getElementCount());
2193 Value *FPVec = IC.Builder.CreateBitCast(Vec, FPVTy);
2194 auto *FPII = IC.Builder.CreateIntrinsic(
2195 II.getIntrinsicID(), {FPVec->getType()}, {Pg, FPFallBack, FPVec});
2196 Value *FPIItoInt = IC.Builder.CreateBitCast(FPII, II.getType());
2197 return IC.replaceInstUsesWith(II, FPIItoInt);
2198}
2199
2200static std::optional<Instruction *> instCombineRDFFR(InstCombiner &IC,
2201 IntrinsicInst &II) {
2202 LLVMContext &Ctx = II.getContext();
2203 // Replace rdffr with predicated rdffr.z intrinsic, so that optimizePTestInstr
2204 // can work with RDFFR_PP for ptest elimination.
2205 auto *AllPat =
2206 ConstantInt::get(Type::getInt32Ty(Ctx), AArch64SVEPredPattern::all);
2207 auto *PTrue = IC.Builder.CreateIntrinsic(Intrinsic::aarch64_sve_ptrue,
2208 {II.getType()}, {AllPat});
2209 auto *RDFFR =
2210 IC.Builder.CreateIntrinsic(Intrinsic::aarch64_sve_rdffr_z, {PTrue});
2211 RDFFR->takeName(&II);
2212 return IC.replaceInstUsesWith(II, RDFFR);
2213}
2214
2215static std::optional<Instruction *>
2217 const auto Pattern = cast<ConstantInt>(II.getArgOperand(0))->getZExtValue();
2218
2219 if (Pattern == AArch64SVEPredPattern::all) {
2221 II.getType(), ElementCount::getScalable(NumElts));
2222 Cnt->takeName(&II);
2223 return IC.replaceInstUsesWith(II, Cnt);
2224 }
2225
2226 unsigned MinNumElts = getNumElementsFromSVEPredPattern(Pattern);
2227
2228 return MinNumElts && NumElts >= MinNumElts
2229 ? std::optional<Instruction *>(IC.replaceInstUsesWith(
2230 II, ConstantInt::get(II.getType(), MinNumElts)))
2231 : std::nullopt;
2232}
2233
2234static std::optional<Instruction *>
2236 const AArch64Subtarget *ST) {
2237 if (!ST->isStreaming())
2238 return std::nullopt;
2239
2240 // In streaming-mode, aarch64_sme_cntds is equivalent to aarch64_sve_cntd
2241 // with SVEPredPattern::all
2242 Value *Cnt =
2244 Cnt->takeName(&II);
2245 return IC.replaceInstUsesWith(II, Cnt);
2246}
2247
2248static std::optional<Instruction *> instCombineSVEPTest(InstCombiner &IC,
2249 IntrinsicInst &II) {
2250 Value *PgVal = II.getArgOperand(0);
2251 Value *OpVal = II.getArgOperand(1);
2252
2253 // PTEST_<FIRST|LAST>(X, X) is equivalent to PTEST_ANY(X, X).
2254 // Later optimizations prefer this form.
2255 if (PgVal == OpVal &&
2256 (II.getIntrinsicID() == Intrinsic::aarch64_sve_ptest_first ||
2257 II.getIntrinsicID() == Intrinsic::aarch64_sve_ptest_last)) {
2258 Value *Ops[] = {PgVal, OpVal};
2259 Type *Tys[] = {PgVal->getType()};
2260
2261 auto *PTest =
2262 IC.Builder.CreateIntrinsic(Intrinsic::aarch64_sve_ptest_any, Tys, Ops);
2263 PTest->takeName(&II);
2264
2265 return IC.replaceInstUsesWith(II, PTest);
2266 }
2267
2270
2271 if (!Pg || !Op)
2272 return std::nullopt;
2273
2274 Intrinsic::ID OpIID = Op->getIntrinsicID();
2275
2276 if (Pg->getIntrinsicID() == Intrinsic::aarch64_sve_convert_to_svbool &&
2277 OpIID == Intrinsic::aarch64_sve_convert_to_svbool &&
2278 Pg->getArgOperand(0)->getType() == Op->getArgOperand(0)->getType()) {
2279 Value *Ops[] = {Pg->getArgOperand(0), Op->getArgOperand(0)};
2280 Type *Tys[] = {Pg->getArgOperand(0)->getType()};
2281
2282 auto *PTest = IC.Builder.CreateIntrinsic(II.getIntrinsicID(), Tys, Ops);
2283
2284 PTest->takeName(&II);
2285 return IC.replaceInstUsesWith(II, PTest);
2286 }
2287
2288 // Transform PTEST_ANY(X=OP(PG,...), X) -> PTEST_ANY(PG, X)).
2289 // Later optimizations may rewrite sequence to use the flag-setting variant
2290 // of instruction X to remove PTEST.
2291 if ((Pg == Op) && (II.getIntrinsicID() == Intrinsic::aarch64_sve_ptest_any) &&
2292 ((OpIID == Intrinsic::aarch64_sve_brka_z) ||
2293 (OpIID == Intrinsic::aarch64_sve_brkb_z) ||
2294 (OpIID == Intrinsic::aarch64_sve_brkpa_z) ||
2295 (OpIID == Intrinsic::aarch64_sve_brkpb_z) ||
2296 (OpIID == Intrinsic::aarch64_sve_rdffr_z) ||
2297 (OpIID == Intrinsic::aarch64_sve_and_z) ||
2298 (OpIID == Intrinsic::aarch64_sve_bic_z) ||
2299 (OpIID == Intrinsic::aarch64_sve_eor_z) ||
2300 (OpIID == Intrinsic::aarch64_sve_nand_z) ||
2301 (OpIID == Intrinsic::aarch64_sve_nor_z) ||
2302 (OpIID == Intrinsic::aarch64_sve_orn_z) ||
2303 (OpIID == Intrinsic::aarch64_sve_orr_z))) {
2304 Value *Ops[] = {Pg->getArgOperand(0), Pg};
2305 Type *Tys[] = {Pg->getType()};
2306
2307 auto *PTest = IC.Builder.CreateIntrinsic(II.getIntrinsicID(), Tys, Ops);
2308 PTest->takeName(&II);
2309
2310 return IC.replaceInstUsesWith(II, PTest);
2311 }
2312
2313 return std::nullopt;
2314}
2315
2316template <Intrinsic::ID MulOpc, Intrinsic::ID FuseOpc>
2317static std::optional<Instruction *>
2319 bool MergeIntoAddendOp) {
2320 Value *P = II.getOperand(0);
2321 Value *MulOp0, *MulOp1, *AddendOp, *Mul;
2322 if (MergeIntoAddendOp) {
2323 AddendOp = II.getOperand(1);
2324 Mul = II.getOperand(2);
2325 } else {
2326 AddendOp = II.getOperand(2);
2327 Mul = II.getOperand(1);
2328 }
2329
2331 m_Value(MulOp1))))
2332 return std::nullopt;
2333
2334 if (!Mul->hasOneUse())
2335 return std::nullopt;
2336
2337 Instruction *FMFSource = nullptr;
2338 if (II.getType()->isFPOrFPVectorTy()) {
2339 llvm::FastMathFlags FAddFlags = II.getFastMathFlags();
2340 // Stop the combine when the flags on the inputs differ in case dropping
2341 // flags would lead to us missing out on more beneficial optimizations.
2342 if (FAddFlags != cast<CallInst>(Mul)->getFastMathFlags())
2343 return std::nullopt;
2344 if (!FAddFlags.allowContract())
2345 return std::nullopt;
2346 FMFSource = &II;
2347 }
2348
2349 CallInst *Res;
2350 if (MergeIntoAddendOp)
2351 Res = IC.Builder.CreateIntrinsic(FuseOpc, {II.getType()},
2352 {P, AddendOp, MulOp0, MulOp1}, FMFSource);
2353 else
2354 Res = IC.Builder.CreateIntrinsic(FuseOpc, {II.getType()},
2355 {P, MulOp0, MulOp1, AddendOp}, FMFSource);
2356
2357 return IC.replaceInstUsesWith(II, Res);
2358}
2359
2360static std::optional<Instruction *>
2362 Value *Pred = II.getOperand(0);
2363 Value *PtrOp = II.getOperand(1);
2364 Type *VecTy = II.getType();
2365
2366 if (isAllActivePredicate(Pred)) {
2367 LoadInst *Load = IC.Builder.CreateLoad(VecTy, PtrOp);
2368 Load->copyMetadata(II);
2369 return IC.replaceInstUsesWith(II, Load);
2370 }
2371
2372 CallInst *MaskedLoad =
2373 IC.Builder.CreateMaskedLoad(VecTy, PtrOp, PtrOp->getPointerAlignment(DL),
2374 Pred, ConstantAggregateZero::get(VecTy));
2375 MaskedLoad->copyMetadata(II);
2376 return IC.replaceInstUsesWith(II, MaskedLoad);
2377}
2378
2379static std::optional<Instruction *>
2381 Value *VecOp = II.getOperand(0);
2382 Value *Pred = II.getOperand(1);
2383 Value *PtrOp = II.getOperand(2);
2384
2385 if (isAllActivePredicate(Pred)) {
2386 StoreInst *Store = IC.Builder.CreateStore(VecOp, PtrOp);
2387 Store->copyMetadata(II);
2388 return IC.eraseInstFromFunction(II);
2389 }
2390
2391 CallInst *MaskedStore = IC.Builder.CreateMaskedStore(
2392 VecOp, PtrOp, PtrOp->getPointerAlignment(DL), Pred);
2393 MaskedStore->copyMetadata(II);
2394 return IC.eraseInstFromFunction(II);
2395}
2396
2398 switch (Intrinsic) {
2399 case Intrinsic::aarch64_sve_fmul_u:
2400 return Instruction::BinaryOps::FMul;
2401 case Intrinsic::aarch64_sve_fadd_u:
2402 return Instruction::BinaryOps::FAdd;
2403 case Intrinsic::aarch64_sve_fsub_u:
2404 return Instruction::BinaryOps::FSub;
2405 default:
2406 return Instruction::BinaryOpsEnd;
2407 }
2408}
2409
2410static std::optional<Instruction *>
2412 // Bail due to missing support for ISD::STRICT_ scalable vector operations.
2413 if (II.isStrictFP())
2414 return std::nullopt;
2415
2416 auto *OpPredicate = II.getOperand(0);
2417 auto BinOpCode = intrinsicIDToBinOpCode(II.getIntrinsicID());
2418 if (BinOpCode == Instruction::BinaryOpsEnd ||
2419 !isAllActivePredicate(OpPredicate))
2420 return std::nullopt;
2421 auto BinOp = IC.Builder.CreateBinOpFMF(
2422 BinOpCode, II.getOperand(1), II.getOperand(2), II.getFastMathFlags());
2423 return IC.replaceInstUsesWith(II, BinOp);
2424}
2425
2426static std::optional<Instruction *> instCombineSVEVectorAdd(InstCombiner &IC,
2427 IntrinsicInst &II) {
2428 if (auto MLA = instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_mul,
2429 Intrinsic::aarch64_sve_mla>(
2430 IC, II, true))
2431 return MLA;
2432 if (auto MAD = instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_mul,
2433 Intrinsic::aarch64_sve_mad>(
2434 IC, II, false))
2435 return MAD;
2436 return std::nullopt;
2437}
2438
2439static std::optional<Instruction *>
2441 if (auto FMLA =
2442 instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_fmul,
2443 Intrinsic::aarch64_sve_fmla>(IC, II,
2444 true))
2445 return FMLA;
2446 if (auto FMAD =
2447 instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_fmul,
2448 Intrinsic::aarch64_sve_fmad>(IC, II,
2449 false))
2450 return FMAD;
2451 if (auto FMLA =
2452 instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_fmul_u,
2453 Intrinsic::aarch64_sve_fmla>(IC, II,
2454 true))
2455 return FMLA;
2456 return std::nullopt;
2457}
2458
2459static std::optional<Instruction *>
2461 if (auto FMLA =
2462 instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_fmul,
2463 Intrinsic::aarch64_sve_fmla>(IC, II,
2464 true))
2465 return FMLA;
2466 if (auto FMAD =
2467 instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_fmul,
2468 Intrinsic::aarch64_sve_fmad>(IC, II,
2469 false))
2470 return FMAD;
2471 if (auto FMLA_U =
2472 instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_fmul_u,
2473 Intrinsic::aarch64_sve_fmla_u>(
2474 IC, II, true))
2475 return FMLA_U;
2476 return instCombineSVEVectorBinOp(IC, II);
2477}
2478
2479static std::optional<Instruction *>
2481 if (auto FMLS =
2482 instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_fmul,
2483 Intrinsic::aarch64_sve_fmls>(IC, II,
2484 true))
2485 return FMLS;
2486 if (auto FMSB =
2487 instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_fmul,
2488 Intrinsic::aarch64_sve_fnmsb>(
2489 IC, II, false))
2490 return FMSB;
2491 if (auto FMLS =
2492 instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_fmul_u,
2493 Intrinsic::aarch64_sve_fmls>(IC, II,
2494 true))
2495 return FMLS;
2496 return std::nullopt;
2497}
2498
2499static std::optional<Instruction *>
2501 if (auto FMLS =
2502 instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_fmul,
2503 Intrinsic::aarch64_sve_fmls>(IC, II,
2504 true))
2505 return FMLS;
2506 if (auto FMSB =
2507 instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_fmul,
2508 Intrinsic::aarch64_sve_fnmsb>(
2509 IC, II, false))
2510 return FMSB;
2511 if (auto FMLS_U =
2512 instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_fmul_u,
2513 Intrinsic::aarch64_sve_fmls_u>(
2514 IC, II, true))
2515 return FMLS_U;
2516 return instCombineSVEVectorBinOp(IC, II);
2517}
2518
2519static std::optional<Instruction *> instCombineSVEVectorSub(InstCombiner &IC,
2520 IntrinsicInst &II) {
2521 if (auto MLS = instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_mul,
2522 Intrinsic::aarch64_sve_mls>(
2523 IC, II, true))
2524 return MLS;
2525 return std::nullopt;
2526}
2527
2528static std::optional<Instruction *> instCombineSVEUnpack(InstCombiner &IC,
2529 IntrinsicInst &II) {
2530 Value *UnpackArg = II.getArgOperand(0);
2531 auto *RetTy = cast<ScalableVectorType>(II.getType());
2532 bool IsSigned = II.getIntrinsicID() == Intrinsic::aarch64_sve_sunpkhi ||
2533 II.getIntrinsicID() == Intrinsic::aarch64_sve_sunpklo;
2534
2535 // Hi = uunpkhi(splat(X)) --> Hi = splat(extend(X))
2536 // Lo = uunpklo(splat(X)) --> Lo = splat(extend(X))
2537 if (auto *ScalarArg = getSplatValue(UnpackArg)) {
2538 ScalarArg =
2539 IC.Builder.CreateIntCast(ScalarArg, RetTy->getScalarType(), IsSigned);
2540 Value *NewVal =
2541 IC.Builder.CreateVectorSplat(RetTy->getElementCount(), ScalarArg);
2542 NewVal->takeName(&II);
2543 return IC.replaceInstUsesWith(II, NewVal);
2544 }
2545
2546 return std::nullopt;
2547}
2548static std::optional<Instruction *> instCombineSVETBL(InstCombiner &IC,
2549 IntrinsicInst &II) {
2550 auto *OpVal = II.getOperand(0);
2551 auto *OpIndices = II.getOperand(1);
2552 VectorType *VTy = cast<VectorType>(II.getType());
2553
2554 // Check whether OpIndices is a constant splat value < minimal element count
2555 // of result.
2556 auto *SplatValue = dyn_cast_or_null<ConstantInt>(getSplatValue(OpIndices));
2557 if (!SplatValue ||
2558 SplatValue->getValue().uge(VTy->getElementCount().getKnownMinValue()))
2559 return std::nullopt;
2560
2561 // Convert sve_tbl(OpVal sve_dup_x(SplatValue)) to
2562 // splat_vector(extractelement(OpVal, SplatValue)) for further optimization.
2563 auto *Extract = IC.Builder.CreateExtractElement(OpVal, SplatValue);
2564 auto *VectorSplat =
2565 IC.Builder.CreateVectorSplat(VTy->getElementCount(), Extract);
2566
2567 VectorSplat->takeName(&II);
2568 return IC.replaceInstUsesWith(II, VectorSplat);
2569}
2570
2571static std::optional<Instruction *> instCombineSVEUzp1(InstCombiner &IC,
2572 IntrinsicInst &II) {
2573 Value *A, *B;
2574 Type *RetTy = II.getType();
2575 constexpr Intrinsic::ID FromSVB = Intrinsic::aarch64_sve_convert_from_svbool;
2576 constexpr Intrinsic::ID ToSVB = Intrinsic::aarch64_sve_convert_to_svbool;
2577
2578 // uzp1(to_svbool(A), to_svbool(B)) --> <A, B>
2579 // uzp1(from_svbool(to_svbool(A)), from_svbool(to_svbool(B))) --> <A, B>
2580 if ((match(II.getArgOperand(0),
2582 match(II.getArgOperand(1),
2584 (match(II.getArgOperand(0), m_Intrinsic<ToSVB>(m_Value(A))) &&
2585 match(II.getArgOperand(1), m_Intrinsic<ToSVB>(m_Value(B))))) {
2586 auto *TyA = cast<ScalableVectorType>(A->getType());
2587 if (TyA == B->getType() &&
2589 auto *SubVec = IC.Builder.CreateInsertVector(
2590 RetTy, PoisonValue::get(RetTy), A, uint64_t(0));
2591 auto *ConcatVec = IC.Builder.CreateInsertVector(RetTy, SubVec, B,
2592 TyA->getMinNumElements());
2593 ConcatVec->takeName(&II);
2594 return IC.replaceInstUsesWith(II, ConcatVec);
2595 }
2596 }
2597
2598 return std::nullopt;
2599}
2600
2601static std::optional<Instruction *> instCombineSVEZip(InstCombiner &IC,
2602 IntrinsicInst &II) {
2603 // zip1(uzp1(A, B), uzp2(A, B)) --> A
2604 // zip2(uzp1(A, B), uzp2(A, B)) --> B
2605 Value *A, *B;
2606 if (match(II.getArgOperand(0),
2609 m_Specific(A), m_Specific(B))))
2610 return IC.replaceInstUsesWith(
2611 II, (II.getIntrinsicID() == Intrinsic::aarch64_sve_zip1 ? A : B));
2612
2613 return std::nullopt;
2614}
2615
2616static std::optional<Instruction *>
2618 Value *Mask = II.getOperand(0);
2619 Value *BasePtr = II.getOperand(1);
2620 Value *Index = II.getOperand(2);
2621 Type *Ty = II.getType();
2622 Value *PassThru = ConstantAggregateZero::get(Ty);
2623
2624 // Contiguous gather => masked load.
2625 // (sve.ld1.gather.index Mask BasePtr (sve.index IndexBase 1))
2626 // => (masked.load (gep BasePtr IndexBase) Align Mask zeroinitializer)
2627 Value *IndexBase;
2629 m_Value(IndexBase), m_SpecificInt(1)))) {
2630 Align Alignment =
2631 BasePtr->getPointerAlignment(II.getDataLayout());
2632
2633 Value *Ptr = IC.Builder.CreateGEP(cast<VectorType>(Ty)->getElementType(),
2634 BasePtr, IndexBase);
2635 CallInst *MaskedLoad =
2636 IC.Builder.CreateMaskedLoad(Ty, Ptr, Alignment, Mask, PassThru);
2637 MaskedLoad->takeName(&II);
2638 return IC.replaceInstUsesWith(II, MaskedLoad);
2639 }
2640
2641 return std::nullopt;
2642}
2643
2644static std::optional<Instruction *>
2646 Value *Val = II.getOperand(0);
2647 Value *Mask = II.getOperand(1);
2648 Value *BasePtr = II.getOperand(2);
2649 Value *Index = II.getOperand(3);
2650 Type *Ty = Val->getType();
2651
2652 // Contiguous scatter => masked store.
2653 // (sve.st1.scatter.index Value Mask BasePtr (sve.index IndexBase 1))
2654 // => (masked.store Value (gep BasePtr IndexBase) Align Mask)
2655 Value *IndexBase;
2657 m_Value(IndexBase), m_SpecificInt(1)))) {
2658 Align Alignment =
2659 BasePtr->getPointerAlignment(II.getDataLayout());
2660
2661 Value *Ptr = IC.Builder.CreateGEP(cast<VectorType>(Ty)->getElementType(),
2662 BasePtr, IndexBase);
2663 (void)IC.Builder.CreateMaskedStore(Val, Ptr, Alignment, Mask);
2664
2665 return IC.eraseInstFromFunction(II);
2666 }
2667
2668 return std::nullopt;
2669}
2670
2671static std::optional<Instruction *> instCombineSVESDIV(InstCombiner &IC,
2672 IntrinsicInst &II) {
2674 Value *Pred = II.getOperand(0);
2675 Value *Vec = II.getOperand(1);
2676 Value *DivVec = II.getOperand(2);
2677
2678 Value *SplatValue = getSplatValue(DivVec);
2679 ConstantInt *SplatConstantInt = dyn_cast_or_null<ConstantInt>(SplatValue);
2680 if (!SplatConstantInt)
2681 return std::nullopt;
2682
2683 APInt Divisor = SplatConstantInt->getValue();
2684 const int64_t DivisorValue = Divisor.getSExtValue();
2685 if (DivisorValue == -1)
2686 return std::nullopt;
2687 if (DivisorValue == 1)
2688 IC.replaceInstUsesWith(II, Vec);
2689
2690 if (Divisor.isPowerOf2()) {
2691 Constant *DivisorLog2 = ConstantInt::get(Int32Ty, Divisor.logBase2());
2692 auto ASRD = IC.Builder.CreateIntrinsic(
2693 Intrinsic::aarch64_sve_asrd, {II.getType()}, {Pred, Vec, DivisorLog2});
2694 return IC.replaceInstUsesWith(II, ASRD);
2695 }
2696 if (Divisor.isNegatedPowerOf2()) {
2697 Divisor.negate();
2698 Constant *DivisorLog2 = ConstantInt::get(Int32Ty, Divisor.logBase2());
2699 auto ASRD = IC.Builder.CreateIntrinsic(
2700 Intrinsic::aarch64_sve_asrd, {II.getType()}, {Pred, Vec, DivisorLog2});
2701 auto NEG = IC.Builder.CreateIntrinsic(
2702 Intrinsic::aarch64_sve_neg, {ASRD->getType()}, {ASRD, Pred, ASRD});
2703 return IC.replaceInstUsesWith(II, NEG);
2704 }
2705
2706 return std::nullopt;
2707}
2708
2709bool SimplifyValuePattern(SmallVector<Value *> &Vec, bool AllowPoison) {
2710 size_t VecSize = Vec.size();
2711 if (VecSize == 1)
2712 return true;
2713 if (!isPowerOf2_64(VecSize))
2714 return false;
2715 size_t HalfVecSize = VecSize / 2;
2716
2717 for (auto LHS = Vec.begin(), RHS = Vec.begin() + HalfVecSize;
2718 RHS != Vec.end(); LHS++, RHS++) {
2719 if (*LHS != nullptr && *RHS != nullptr) {
2720 if (*LHS == *RHS)
2721 continue;
2722 else
2723 return false;
2724 }
2725 if (!AllowPoison)
2726 return false;
2727 if (*LHS == nullptr && *RHS != nullptr)
2728 *LHS = *RHS;
2729 }
2730
2731 Vec.resize(HalfVecSize);
2732 SimplifyValuePattern(Vec, AllowPoison);
2733 return true;
2734}
2735
2736// Try to simplify dupqlane patterns like dupqlane(f32 A, f32 B, f32 A, f32 B)
2737// to dupqlane(f64(C)) where C is A concatenated with B
2738static std::optional<Instruction *> instCombineSVEDupqLane(InstCombiner &IC,
2739 IntrinsicInst &II) {
2740 Value *CurrentInsertElt = nullptr, *Default = nullptr;
2741 if (!match(II.getOperand(0),
2743 m_Value(Default), m_Value(CurrentInsertElt), m_Value())) ||
2744 !isa<FixedVectorType>(CurrentInsertElt->getType()))
2745 return std::nullopt;
2746 auto IIScalableTy = cast<ScalableVectorType>(II.getType());
2747
2748 // Insert the scalars into a container ordered by InsertElement index
2749 SmallVector<Value *> Elts(IIScalableTy->getMinNumElements(), nullptr);
2750 while (auto InsertElt = dyn_cast<InsertElementInst>(CurrentInsertElt)) {
2751 auto Idx = cast<ConstantInt>(InsertElt->getOperand(2));
2752 Elts[Idx->getValue().getZExtValue()] = InsertElt->getOperand(1);
2753 CurrentInsertElt = InsertElt->getOperand(0);
2754 }
2755
2756 bool AllowPoison =
2757 isa<PoisonValue>(CurrentInsertElt) && isa<PoisonValue>(Default);
2758 if (!SimplifyValuePattern(Elts, AllowPoison))
2759 return std::nullopt;
2760
2761 // Rebuild the simplified chain of InsertElements. e.g. (a, b, a, b) as (a, b)
2762 Value *InsertEltChain = PoisonValue::get(CurrentInsertElt->getType());
2763 for (size_t I = 0; I < Elts.size(); I++) {
2764 if (Elts[I] == nullptr)
2765 continue;
2766 InsertEltChain = IC.Builder.CreateInsertElement(InsertEltChain, Elts[I],
2767 IC.Builder.getInt64(I));
2768 }
2769 if (InsertEltChain == nullptr)
2770 return std::nullopt;
2771
2772 // Splat the simplified sequence, e.g. (f16 a, f16 b, f16 c, f16 d) as one i64
2773 // value or (f16 a, f16 b) as one i32 value. This requires an InsertSubvector
2774 // be bitcast to a type wide enough to fit the sequence, be splatted, and then
2775 // be narrowed back to the original type.
2776 unsigned PatternWidth = IIScalableTy->getScalarSizeInBits() * Elts.size();
2777 unsigned PatternElementCount = IIScalableTy->getScalarSizeInBits() *
2778 IIScalableTy->getMinNumElements() /
2779 PatternWidth;
2780
2781 IntegerType *WideTy = IC.Builder.getIntNTy(PatternWidth);
2782 auto *WideScalableTy = ScalableVectorType::get(WideTy, PatternElementCount);
2783 auto *WideShuffleMaskTy =
2784 ScalableVectorType::get(IC.Builder.getInt32Ty(), PatternElementCount);
2785
2786 auto InsertSubvector = IC.Builder.CreateInsertVector(
2787 II.getType(), PoisonValue::get(II.getType()), InsertEltChain,
2788 uint64_t(0));
2789 auto WideBitcast =
2790 IC.Builder.CreateBitOrPointerCast(InsertSubvector, WideScalableTy);
2791 auto WideShuffleMask = ConstantAggregateZero::get(WideShuffleMaskTy);
2792 auto WideShuffle = IC.Builder.CreateShuffleVector(
2793 WideBitcast, PoisonValue::get(WideScalableTy), WideShuffleMask);
2794 auto NarrowBitcast =
2795 IC.Builder.CreateBitOrPointerCast(WideShuffle, II.getType());
2796
2797 return IC.replaceInstUsesWith(II, NarrowBitcast);
2798}
2799
2800static std::optional<Instruction *> instCombineMaxMinNM(InstCombiner &IC,
2801 IntrinsicInst &II) {
2802 Value *A = II.getArgOperand(0);
2803 Value *B = II.getArgOperand(1);
2804 if (A == B)
2805 return IC.replaceInstUsesWith(II, A);
2806
2807 return std::nullopt;
2808}
2809
2810static std::optional<Instruction *> instCombineSVESrshl(InstCombiner &IC,
2811 IntrinsicInst &II) {
2812 Value *Pred = II.getOperand(0);
2813 Value *Vec = II.getOperand(1);
2814 Value *Shift = II.getOperand(2);
2815
2816 // Convert SRSHL into the simpler LSL intrinsic when fed by an ABS intrinsic.
2817 Value *AbsPred, *MergedValue;
2819 m_Value(MergedValue), m_Value(AbsPred), m_Value())) &&
2821 m_Value(MergedValue), m_Value(AbsPred), m_Value())))
2822
2823 return std::nullopt;
2824
2825 // Transform is valid if any of the following are true:
2826 // * The ABS merge value is an undef or non-negative
2827 // * The ABS predicate is all active
2828 // * The ABS predicate and the SRSHL predicates are the same
2829 if (!isa<UndefValue>(MergedValue) && !match(MergedValue, m_NonNegative()) &&
2830 AbsPred != Pred && !isAllActivePredicate(AbsPred))
2831 return std::nullopt;
2832
2833 // Only valid when the shift amount is non-negative, otherwise the rounding
2834 // behaviour of SRSHL cannot be ignored.
2835 if (!match(Shift, m_NonNegative()))
2836 return std::nullopt;
2837
2838 auto LSL = IC.Builder.CreateIntrinsic(Intrinsic::aarch64_sve_lsl,
2839 {II.getType()}, {Pred, Vec, Shift});
2840
2841 return IC.replaceInstUsesWith(II, LSL);
2842}
2843
2844static std::optional<Instruction *> instCombineSVEInsr(InstCombiner &IC,
2845 IntrinsicInst &II) {
2846 Value *Vec = II.getOperand(0);
2847
2848 if (getSplatValue(Vec) == II.getOperand(1))
2849 return IC.replaceInstUsesWith(II, Vec);
2850
2851 return std::nullopt;
2852}
2853
2854static std::optional<Instruction *> instCombineDMB(InstCombiner &IC,
2855 IntrinsicInst &II) {
2856 // If this barrier is post-dominated by identical one we can remove it
2857 auto *NI = II.getNextNode();
2858 unsigned LookaheadThreshold = DMBLookaheadThreshold;
2859 auto CanSkipOver = [](Instruction *I) {
2860 return !I->mayReadOrWriteMemory() && !I->mayHaveSideEffects();
2861 };
2862 while (LookaheadThreshold-- && CanSkipOver(NI)) {
2863 auto *NIBB = NI->getParent();
2864 NI = NI->getNextNode();
2865 if (!NI) {
2866 if (auto *SuccBB = NIBB->getUniqueSuccessor())
2867 NI = &*SuccBB->getFirstNonPHIOrDbgOrLifetime();
2868 else
2869 break;
2870 }
2871 }
2872 auto *NextII = dyn_cast_or_null<IntrinsicInst>(NI);
2873 if (NextII && II.isIdenticalTo(NextII))
2874 return IC.eraseInstFromFunction(II);
2875
2876 return std::nullopt;
2877}
2878
2879static std::optional<Instruction *> instCombineWhilelo(InstCombiner &IC,
2880 IntrinsicInst &II) {
2881 return IC.replaceInstUsesWith(
2882 II,
2883 IC.Builder.CreateIntrinsic(Intrinsic::get_active_lane_mask,
2884 {II.getType(), II.getOperand(0)->getType()},
2885 {II.getOperand(0), II.getOperand(1)}));
2886}
2887
2888static std::optional<Instruction *> instCombinePTrue(InstCombiner &IC,
2889 IntrinsicInst &II) {
2891 return IC.replaceInstUsesWith(II, Constant::getAllOnesValue(II.getType()));
2892 return std::nullopt;
2893}
2894
2895static std::optional<Instruction *> instCombineSVEUxt(InstCombiner &IC,
2897 unsigned NumBits) {
2898 Value *Passthru = II.getOperand(0);
2899 Value *Pg = II.getOperand(1);
2900 Value *Op = II.getOperand(2);
2901
2902 // Convert UXT[BHW] to AND.
2903 if (isa<UndefValue>(Passthru) || isAllActivePredicate(Pg)) {
2904 auto *Ty = cast<VectorType>(II.getType());
2905 auto MaskValue = APInt::getLowBitsSet(Ty->getScalarSizeInBits(), NumBits);
2906 auto *Mask = ConstantInt::get(Ty, MaskValue);
2907 auto *And = IC.Builder.CreateIntrinsic(Intrinsic::aarch64_sve_and_u, {Ty},
2908 {Pg, Op, Mask});
2909 return IC.replaceInstUsesWith(II, And);
2910 }
2911
2912 return std::nullopt;
2913}
2914
2915static std::optional<Instruction *>
2917 SMEAttrs FnSMEAttrs(*II.getFunction());
2918 bool IsStreaming = FnSMEAttrs.hasStreamingInterfaceOrBody();
2919 if (IsStreaming || !FnSMEAttrs.hasStreamingCompatibleInterface())
2920 return IC.replaceInstUsesWith(
2921 II, ConstantInt::getBool(II.getType(), IsStreaming));
2922 return std::nullopt;
2923}
2924
2925std::optional<Instruction *>
2927 IntrinsicInst &II) const {
2929 if (std::optional<Instruction *> I = simplifySVEIntrinsic(IC, II, IInfo))
2930 return I;
2931
2932 Intrinsic::ID IID = II.getIntrinsicID();
2933 switch (IID) {
2934 default:
2935 break;
2936 case Intrinsic::aarch64_dmb:
2937 return instCombineDMB(IC, II);
2938 case Intrinsic::aarch64_neon_fmaxnm:
2939 case Intrinsic::aarch64_neon_fminnm:
2940 return instCombineMaxMinNM(IC, II);
2941 case Intrinsic::aarch64_sve_convert_from_svbool:
2942 return instCombineConvertFromSVBool(IC, II);
2943 case Intrinsic::aarch64_sve_dup:
2944 return instCombineSVEDup(IC, II);
2945 case Intrinsic::aarch64_sve_dup_x:
2946 return instCombineSVEDupX(IC, II);
2947 case Intrinsic::aarch64_sve_cmpne:
2948 case Intrinsic::aarch64_sve_cmpne_wide:
2949 return instCombineSVECmpNE(IC, II);
2950 case Intrinsic::aarch64_sve_rdffr:
2951 return instCombineRDFFR(IC, II);
2952 case Intrinsic::aarch64_sve_lasta:
2953 case Intrinsic::aarch64_sve_lastb:
2954 return instCombineSVELast(IC, II);
2955 case Intrinsic::aarch64_sve_clasta_n:
2956 case Intrinsic::aarch64_sve_clastb_n:
2957 return instCombineSVECondLast(IC, II);
2958 case Intrinsic::aarch64_sve_cntd:
2959 return instCombineSVECntElts(IC, II, 2);
2960 case Intrinsic::aarch64_sve_cntw:
2961 return instCombineSVECntElts(IC, II, 4);
2962 case Intrinsic::aarch64_sve_cnth:
2963 return instCombineSVECntElts(IC, II, 8);
2964 case Intrinsic::aarch64_sve_cntb:
2965 return instCombineSVECntElts(IC, II, 16);
2966 case Intrinsic::aarch64_sme_cntsd:
2967 return instCombineSMECntsd(IC, II, ST);
2968 case Intrinsic::aarch64_sve_ptest_any:
2969 case Intrinsic::aarch64_sve_ptest_first:
2970 case Intrinsic::aarch64_sve_ptest_last:
2971 return instCombineSVEPTest(IC, II);
2972 case Intrinsic::aarch64_sve_fadd:
2973 return instCombineSVEVectorFAdd(IC, II);
2974 case Intrinsic::aarch64_sve_fadd_u:
2975 return instCombineSVEVectorFAddU(IC, II);
2976 case Intrinsic::aarch64_sve_fmul_u:
2977 return instCombineSVEVectorBinOp(IC, II);
2978 case Intrinsic::aarch64_sve_fsub:
2979 return instCombineSVEVectorFSub(IC, II);
2980 case Intrinsic::aarch64_sve_fsub_u:
2981 return instCombineSVEVectorFSubU(IC, II);
2982 case Intrinsic::aarch64_sve_add:
2983 return instCombineSVEVectorAdd(IC, II);
2984 case Intrinsic::aarch64_sve_add_u:
2985 return instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_mul_u,
2986 Intrinsic::aarch64_sve_mla_u>(
2987 IC, II, true);
2988 case Intrinsic::aarch64_sve_sub:
2989 return instCombineSVEVectorSub(IC, II);
2990 case Intrinsic::aarch64_sve_sub_u:
2991 return instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_mul_u,
2992 Intrinsic::aarch64_sve_mls_u>(
2993 IC, II, true);
2994 case Intrinsic::aarch64_sve_tbl:
2995 return instCombineSVETBL(IC, II);
2996 case Intrinsic::aarch64_sve_uunpkhi:
2997 case Intrinsic::aarch64_sve_uunpklo:
2998 case Intrinsic::aarch64_sve_sunpkhi:
2999 case Intrinsic::aarch64_sve_sunpklo:
3000 return instCombineSVEUnpack(IC, II);
3001 case Intrinsic::aarch64_sve_uzp1:
3002 return instCombineSVEUzp1(IC, II);
3003 case Intrinsic::aarch64_sve_zip1:
3004 case Intrinsic::aarch64_sve_zip2:
3005 return instCombineSVEZip(IC, II);
3006 case Intrinsic::aarch64_sve_ld1_gather_index:
3007 return instCombineLD1GatherIndex(IC, II);
3008 case Intrinsic::aarch64_sve_st1_scatter_index:
3009 return instCombineST1ScatterIndex(IC, II);
3010 case Intrinsic::aarch64_sve_ld1:
3011 return instCombineSVELD1(IC, II, DL);
3012 case Intrinsic::aarch64_sve_st1:
3013 return instCombineSVEST1(IC, II, DL);
3014 case Intrinsic::aarch64_sve_sdiv:
3015 return instCombineSVESDIV(IC, II);
3016 case Intrinsic::aarch64_sve_sel:
3017 return instCombineSVESel(IC, II);
3018 case Intrinsic::aarch64_sve_srshl:
3019 return instCombineSVESrshl(IC, II);
3020 case Intrinsic::aarch64_sve_dupq_lane:
3021 return instCombineSVEDupqLane(IC, II);
3022 case Intrinsic::aarch64_sve_insr:
3023 return instCombineSVEInsr(IC, II);
3024 case Intrinsic::aarch64_sve_whilelo:
3025 return instCombineWhilelo(IC, II);
3026 case Intrinsic::aarch64_sve_ptrue:
3027 return instCombinePTrue(IC, II);
3028 case Intrinsic::aarch64_sve_uxtb:
3029 return instCombineSVEUxt(IC, II, 8);
3030 case Intrinsic::aarch64_sve_uxth:
3031 return instCombineSVEUxt(IC, II, 16);
3032 case Intrinsic::aarch64_sve_uxtw:
3033 return instCombineSVEUxt(IC, II, 32);
3034 case Intrinsic::aarch64_sme_in_streaming_mode:
3035 return instCombineInStreamingMode(IC, II);
3036 }
3037
3038 return std::nullopt;
3039}
3040
3042 InstCombiner &IC, IntrinsicInst &II, APInt OrigDemandedElts,
3043 APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3,
3044 std::function<void(Instruction *, unsigned, APInt, APInt &)>
3045 SimplifyAndSetOp) const {
3046 switch (II.getIntrinsicID()) {
3047 default:
3048 break;
3049 case Intrinsic::aarch64_neon_fcvtxn:
3050 case Intrinsic::aarch64_neon_rshrn:
3051 case Intrinsic::aarch64_neon_sqrshrn:
3052 case Intrinsic::aarch64_neon_sqrshrun:
3053 case Intrinsic::aarch64_neon_sqshrn:
3054 case Intrinsic::aarch64_neon_sqshrun:
3055 case Intrinsic::aarch64_neon_sqxtn:
3056 case Intrinsic::aarch64_neon_sqxtun:
3057 case Intrinsic::aarch64_neon_uqrshrn:
3058 case Intrinsic::aarch64_neon_uqshrn:
3059 case Intrinsic::aarch64_neon_uqxtn:
3060 SimplifyAndSetOp(&II, 0, OrigDemandedElts, UndefElts);
3061 break;
3062 }
3063
3064 return std::nullopt;
3065}
3066
3068 return ST->isSVEAvailable() || (ST->isSVEorStreamingSVEAvailable() &&
3070}
3071
3074 switch (K) {
3076 return TypeSize::getFixed(64);
3078 if (ST->useSVEForFixedLengthVectors() &&
3079 (ST->isSVEAvailable() || EnableFixedwidthAutovecInStreamingMode))
3080 return TypeSize::getFixed(
3081 std::max(ST->getMinSVEVectorSizeInBits(), 128u));
3082 else if (ST->isNeonAvailable())
3083 return TypeSize::getFixed(128);
3084 else
3085 return TypeSize::getFixed(0);
3087 if (ST->isSVEAvailable() || (ST->isSVEorStreamingSVEAvailable() &&
3089 return TypeSize::getScalable(128);
3090 else
3091 return TypeSize::getScalable(0);
3092 }
3093 llvm_unreachable("Unsupported register kind");
3094}
3095
3096bool AArch64TTIImpl::isSingleExtWideningInstruction(
3097 unsigned Opcode, Type *DstTy, ArrayRef<const Value *> Args,
3098 Type *SrcOverrideTy) const {
3099 // A helper that returns a vector type from the given type. The number of
3100 // elements in type Ty determines the vector width.
3101 auto toVectorTy = [&](Type *ArgTy) {
3102 return VectorType::get(ArgTy->getScalarType(),
3103 cast<VectorType>(DstTy)->getElementCount());
3104 };
3105
3106 // Exit early if DstTy is not a vector type whose elements are one of [i16,
3107 // i32, i64]. SVE doesn't generally have the same set of instructions to
3108 // perform an extend with the add/sub/mul. There are SMULLB style
3109 // instructions, but they operate on top/bottom, requiring some sort of lane
3110 // interleaving to be used with zext/sext.
3111 unsigned DstEltSize = DstTy->getScalarSizeInBits();
3112 if (!useNeonVector(DstTy) || Args.size() != 2 ||
3113 (DstEltSize != 16 && DstEltSize != 32 && DstEltSize != 64))
3114 return false;
3115
3116 Type *SrcTy = SrcOverrideTy;
3117 switch (Opcode) {
3118 case Instruction::Add: // UADDW(2), SADDW(2).
3119 case Instruction::Sub: { // USUBW(2), SSUBW(2).
3120 // The second operand needs to be an extend
3121 if (isa<SExtInst>(Args[1]) || isa<ZExtInst>(Args[1])) {
3122 if (!SrcTy)
3123 SrcTy =
3124 toVectorTy(cast<Instruction>(Args[1])->getOperand(0)->getType());
3125 break;
3126 }
3127
3128 if (Opcode == Instruction::Sub)
3129 return false;
3130
3131 // UADDW(2), SADDW(2) can be commutted.
3132 if (isa<SExtInst>(Args[0]) || isa<ZExtInst>(Args[0])) {
3133 if (!SrcTy)
3134 SrcTy =
3135 toVectorTy(cast<Instruction>(Args[0])->getOperand(0)->getType());
3136 break;
3137 }
3138 return false;
3139 }
3140 default:
3141 return false;
3142 }
3143
3144 // Legalize the destination type and ensure it can be used in a widening
3145 // operation.
3146 auto DstTyL = getTypeLegalizationCost(DstTy);
3147 if (!DstTyL.second.isVector() || DstEltSize != DstTy->getScalarSizeInBits())
3148 return false;
3149
3150 // Legalize the source type and ensure it can be used in a widening
3151 // operation.
3152 assert(SrcTy && "Expected some SrcTy");
3153 auto SrcTyL = getTypeLegalizationCost(SrcTy);
3154 unsigned SrcElTySize = SrcTyL.second.getScalarSizeInBits();
3155 if (!SrcTyL.second.isVector() || SrcElTySize != SrcTy->getScalarSizeInBits())
3156 return false;
3157
3158 // Get the total number of vector elements in the legalized types.
3159 InstructionCost NumDstEls =
3160 DstTyL.first * DstTyL.second.getVectorMinNumElements();
3161 InstructionCost NumSrcEls =
3162 SrcTyL.first * SrcTyL.second.getVectorMinNumElements();
3163
3164 // Return true if the legalized types have the same number of vector elements
3165 // and the destination element type size is twice that of the source type.
3166 return NumDstEls == NumSrcEls && 2 * SrcElTySize == DstEltSize;
3167}
3168
3169Type *AArch64TTIImpl::isBinExtWideningInstruction(unsigned Opcode, Type *DstTy,
3171 Type *SrcOverrideTy) const {
3172 if (Opcode != Instruction::Add && Opcode != Instruction::Sub &&
3173 Opcode != Instruction::Mul)
3174 return nullptr;
3175
3176 // Exit early if DstTy is not a vector type whose elements are one of [i16,
3177 // i32, i64]. SVE doesn't generally have the same set of instructions to
3178 // perform an extend with the add/sub/mul. There are SMULLB style
3179 // instructions, but they operate on top/bottom, requiring some sort of lane
3180 // interleaving to be used with zext/sext.
3181 unsigned DstEltSize = DstTy->getScalarSizeInBits();
3182 if (!useNeonVector(DstTy) || Args.size() != 2 ||
3183 (DstEltSize != 16 && DstEltSize != 32 && DstEltSize != 64))
3184 return nullptr;
3185
3186 auto getScalarSizeWithOverride = [&](const Value *V) {
3187 if (SrcOverrideTy)
3188 return SrcOverrideTy->getScalarSizeInBits();
3189 return cast<Instruction>(V)
3190 ->getOperand(0)
3191 ->getType()
3192 ->getScalarSizeInBits();
3193 };
3194
3195 unsigned MaxEltSize = 0;
3196 if ((isa<SExtInst>(Args[0]) && isa<SExtInst>(Args[1])) ||
3197 (isa<ZExtInst>(Args[0]) && isa<ZExtInst>(Args[1]))) {
3198 unsigned EltSize0 = getScalarSizeWithOverride(Args[0]);
3199 unsigned EltSize1 = getScalarSizeWithOverride(Args[1]);
3200 MaxEltSize = std::max(EltSize0, EltSize1);
3201 } else if (isa<SExtInst, ZExtInst>(Args[0]) &&
3202 isa<SExtInst, ZExtInst>(Args[1])) {
3203 unsigned EltSize0 = getScalarSizeWithOverride(Args[0]);
3204 unsigned EltSize1 = getScalarSizeWithOverride(Args[1]);
3205 // mul(sext, zext) will become smull(sext, zext) if the extends are large
3206 // enough.
3207 if (EltSize0 >= DstEltSize / 2 || EltSize1 >= DstEltSize / 2)
3208 return nullptr;
3209 MaxEltSize = DstEltSize / 2;
3210 } else if (Opcode == Instruction::Mul &&
3211 (isa<ZExtInst>(Args[0]) || isa<ZExtInst>(Args[1]))) {
3212 // If one of the operands is a Zext and the other has enough zero bits
3213 // to be treated as unsigned, we can still generate a umull, meaning the
3214 // zext is free.
3215 KnownBits Known =
3216 computeKnownBits(isa<ZExtInst>(Args[0]) ? Args[1] : Args[0], DL);
3217 if (Args[0]->getType()->getScalarSizeInBits() -
3218 Known.Zero.countLeadingOnes() >
3219 DstTy->getScalarSizeInBits() / 2)
3220 return nullptr;
3221
3222 MaxEltSize =
3223 getScalarSizeWithOverride(isa<ZExtInst>(Args[0]) ? Args[0] : Args[1]);
3224 } else
3225 return nullptr;
3226
3227 if (MaxEltSize * 2 > DstEltSize)
3228 return nullptr;
3229
3230 Type *ExtTy = DstTy->getWithNewBitWidth(MaxEltSize * 2);
3231 if (ExtTy->getPrimitiveSizeInBits() <= 64)
3232 return nullptr;
3233 return ExtTy;
3234}
3235
3236// s/urhadd instructions implement the following pattern, making the
3237// extends free:
3238// %x = add ((zext i8 -> i16), 1)
3239// %y = (zext i8 -> i16)
3240// trunc i16 (lshr (add %x, %y), 1) -> i8
3241//
3243 Type *Src) const {
3244 // The source should be a legal vector type.
3245 if (!Src->isVectorTy() || !TLI->isTypeLegal(TLI->getValueType(DL, Src)) ||
3246 (Src->isScalableTy() && !ST->hasSVE2()))
3247 return false;
3248
3249 if (ExtUser->getOpcode() != Instruction::Add || !ExtUser->hasOneUse())
3250 return false;
3251
3252 // Look for trunc/shl/add before trying to match the pattern.
3253 const Instruction *Add = ExtUser;
3254 auto *AddUser =
3255 dyn_cast_or_null<Instruction>(Add->getUniqueUndroppableUser());
3256 if (AddUser && AddUser->getOpcode() == Instruction::Add)
3257 Add = AddUser;
3258
3259 auto *Shr = dyn_cast_or_null<Instruction>(Add->getUniqueUndroppableUser());
3260 if (!Shr || Shr->getOpcode() != Instruction::LShr)
3261 return false;
3262
3263 auto *Trunc = dyn_cast_or_null<Instruction>(Shr->getUniqueUndroppableUser());
3264 if (!Trunc || Trunc->getOpcode() != Instruction::Trunc ||
3265 Src->getScalarSizeInBits() !=
3266 cast<CastInst>(Trunc)->getDestTy()->getScalarSizeInBits())
3267 return false;
3268
3269 // Try to match the whole pattern. Ext could be either the first or second
3270 // m_ZExtOrSExt matched.
3271 Instruction *Ex1, *Ex2;
3272 if (!(match(Add, m_c_Add(m_Instruction(Ex1),
3273 m_c_Add(m_Instruction(Ex2), m_SpecificInt(1))))))
3274 return false;
3275
3276 // Ensure both extends are of the same type
3277 if (match(Ex1, m_ZExtOrSExt(m_Value())) &&
3278 Ex1->getOpcode() == Ex2->getOpcode())
3279 return true;
3280
3281 return false;
3282}
3283
3285 Type *Src,
3288 const Instruction *I) const {
3289 int ISD = TLI->InstructionOpcodeToISD(Opcode);
3290 assert(ISD && "Invalid opcode");
3291 // If the cast is observable, and it is used by a widening instruction (e.g.,
3292 // uaddl, saddw, etc.), it may be free.
3293 if (I && I->hasOneUser()) {
3294 auto *SingleUser = cast<Instruction>(*I->user_begin());
3295 SmallVector<const Value *, 4> Operands(SingleUser->operand_values());
3296 if (Type *ExtTy = isBinExtWideningInstruction(
3297 SingleUser->getOpcode(), Dst, Operands,
3298 Src != I->getOperand(0)->getType() ? Src : nullptr)) {
3299 // The cost from Src->Src*2 needs to be added if required, the cost from
3300 // Src*2->ExtTy is free.
3301 if (ExtTy->getScalarSizeInBits() > Src->getScalarSizeInBits() * 2) {
3302 Type *DoubleSrcTy =
3303 Src->getWithNewBitWidth(Src->getScalarSizeInBits() * 2);
3304 return getCastInstrCost(Opcode, DoubleSrcTy, Src,
3306 }
3307
3308 return 0;
3309 }
3310
3311 if (isSingleExtWideningInstruction(
3312 SingleUser->getOpcode(), Dst, Operands,
3313 Src != I->getOperand(0)->getType() ? Src : nullptr)) {
3314 // For adds only count the second operand as free if both operands are
3315 // extends but not the same operation. (i.e both operands are not free in
3316 // add(sext, zext)).
3317 if (SingleUser->getOpcode() == Instruction::Add) {
3318 if (I == SingleUser->getOperand(1) ||
3319 (isa<CastInst>(SingleUser->getOperand(1)) &&
3320 cast<CastInst>(SingleUser->getOperand(1))->getOpcode() == Opcode))
3321 return 0;
3322 } else {
3323 // Others are free so long as isSingleExtWideningInstruction
3324 // returned true.
3325 return 0;
3326 }
3327 }
3328
3329 // The cast will be free for the s/urhadd instructions
3330 if ((isa<ZExtInst>(I) || isa<SExtInst>(I)) &&
3331 isExtPartOfAvgExpr(SingleUser, Dst, Src))
3332 return 0;
3333 }
3334
3335 EVT SrcTy = TLI->getValueType(DL, Src);
3336 EVT DstTy = TLI->getValueType(DL, Dst);
3337
3338 if (!SrcTy.isSimple() || !DstTy.isSimple())
3339 return BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
3340
3341 // For the moment we do not have lowering for SVE1-only fptrunc f64->bf16 as
3342 // we use fcvtx under SVE2. Give them invalid costs.
3343 if (!ST->hasSVE2() && !ST->isStreamingSVEAvailable() &&
3344 ISD == ISD::FP_ROUND && SrcTy.isScalableVector() &&
3345 DstTy.getScalarType() == MVT::bf16 && SrcTy.getScalarType() == MVT::f64)
3347
3348 static const TypeConversionCostTblEntry BF16Tbl[] = {
3349 {ISD::FP_ROUND, MVT::bf16, MVT::f32, 1}, // bfcvt
3350 {ISD::FP_ROUND, MVT::bf16, MVT::f64, 1}, // bfcvt
3351 {ISD::FP_ROUND, MVT::v4bf16, MVT::v4f32, 1}, // bfcvtn
3352 {ISD::FP_ROUND, MVT::v8bf16, MVT::v8f32, 2}, // bfcvtn+bfcvtn2
3353 {ISD::FP_ROUND, MVT::v2bf16, MVT::v2f64, 2}, // bfcvtn+fcvtn
3354 {ISD::FP_ROUND, MVT::v4bf16, MVT::v4f64, 3}, // fcvtn+fcvtl2+bfcvtn
3355 {ISD::FP_ROUND, MVT::v8bf16, MVT::v8f64, 6}, // 2 * fcvtn+fcvtn2+bfcvtn
3356 {ISD::FP_ROUND, MVT::nxv2bf16, MVT::nxv2f32, 1}, // bfcvt
3357 {ISD::FP_ROUND, MVT::nxv4bf16, MVT::nxv4f32, 1}, // bfcvt
3358 {ISD::FP_ROUND, MVT::nxv8bf16, MVT::nxv8f32, 3}, // bfcvt+bfcvt+uzp1
3359 {ISD::FP_ROUND, MVT::nxv2bf16, MVT::nxv2f64, 2}, // fcvtx+bfcvt
3360 {ISD::FP_ROUND, MVT::nxv4bf16, MVT::nxv4f64, 5}, // 2*fcvtx+2*bfcvt+uzp1
3361 {ISD::FP_ROUND, MVT::nxv8bf16, MVT::nxv8f64, 11}, // 4*fcvt+4*bfcvt+3*uzp
3362 };
3363
3364 if (ST->hasBF16())
3365 if (const auto *Entry = ConvertCostTableLookup(
3366 BF16Tbl, ISD, DstTy.getSimpleVT(), SrcTy.getSimpleVT()))
3367 return Entry->Cost;
3368
3369 // Symbolic constants for the SVE sitofp/uitofp entries in the table below
3370 // The cost of unpacking twice is artificially increased for now in order
3371 // to avoid regressions against NEON, which will use tbl instructions directly
3372 // instead of multiple layers of [s|u]unpk[lo|hi].
3373 // We use the unpacks in cases where the destination type is illegal and
3374 // requires splitting of the input, even if the input type itself is legal.
3375 const unsigned int SVE_EXT_COST = 1;
3376 const unsigned int SVE_FCVT_COST = 1;
3377 const unsigned int SVE_UNPACK_ONCE = 4;
3378 const unsigned int SVE_UNPACK_TWICE = 16;
3379
3380 static const TypeConversionCostTblEntry ConversionTbl[] = {
3381 {ISD::TRUNCATE, MVT::v2i8, MVT::v2i64, 1}, // xtn
3382 {ISD::TRUNCATE, MVT::v2i16, MVT::v2i64, 1}, // xtn
3383 {ISD::TRUNCATE, MVT::v2i32, MVT::v2i64, 1}, // xtn
3384 {ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 1}, // xtn
3385 {ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 3}, // 2 xtn + 1 uzp1
3386 {ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1}, // xtn
3387 {ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2}, // 1 uzp1 + 1 xtn
3388 {ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1}, // 1 uzp1
3389 {ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 1}, // 1 xtn
3390 {ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2}, // 1 uzp1 + 1 xtn
3391 {ISD::TRUNCATE, MVT::v8i8, MVT::v8i64, 4}, // 3 x uzp1 + xtn
3392 {ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 1}, // 1 uzp1
3393 {ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 3}, // 3 x uzp1
3394 {ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 2}, // 2 x uzp1
3395 {ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 1}, // uzp1
3396 {ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 3}, // (2 + 1) x uzp1
3397 {ISD::TRUNCATE, MVT::v16i8, MVT::v16i64, 7}, // (4 + 2 + 1) x uzp1
3398 {ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 2}, // 2 x uzp1
3399 {ISD::TRUNCATE, MVT::v16i16, MVT::v16i64, 6}, // (4 + 2) x uzp1
3400 {ISD::TRUNCATE, MVT::v16i32, MVT::v16i64, 4}, // 4 x uzp1
3401
3402 // Truncations on nxvmiN
3403 {ISD::TRUNCATE, MVT::nxv2i1, MVT::nxv2i8, 2},
3404 {ISD::TRUNCATE, MVT::nxv2i1, MVT::nxv2i16, 2},
3405 {ISD::TRUNCATE, MVT::nxv2i1, MVT::nxv2i32, 2},
3406 {ISD::TRUNCATE, MVT::nxv2i1, MVT::nxv2i64, 2},
3407 {ISD::TRUNCATE, MVT::nxv4i1, MVT::nxv4i8, 2},
3408 {ISD::TRUNCATE, MVT::nxv4i1, MVT::nxv4i16, 2},
3409 {ISD::TRUNCATE, MVT::nxv4i1, MVT::nxv4i32, 2},
3410 {ISD::TRUNCATE, MVT::nxv4i1, MVT::nxv4i64, 5},
3411 {ISD::TRUNCATE, MVT::nxv8i1, MVT::nxv8i8, 2},
3412 {ISD::TRUNCATE, MVT::nxv8i1, MVT::nxv8i16, 2},
3413 {ISD::TRUNCATE, MVT::nxv8i1, MVT::nxv8i32, 5},
3414 {ISD::TRUNCATE, MVT::nxv8i1, MVT::nxv8i64, 11},
3415 {ISD::TRUNCATE, MVT::nxv16i1, MVT::nxv16i8, 2},
3416 {ISD::TRUNCATE, MVT::nxv2i8, MVT::nxv2i16, 0},
3417 {ISD::TRUNCATE, MVT::nxv2i8, MVT::nxv2i32, 0},
3418 {ISD::TRUNCATE, MVT::nxv2i8, MVT::nxv2i64, 0},
3419 {ISD::TRUNCATE, MVT::nxv2i16, MVT::nxv2i32, 0},
3420 {ISD::TRUNCATE, MVT::nxv2i16, MVT::nxv2i64, 0},
3421 {ISD::TRUNCATE, MVT::nxv2i32, MVT::nxv2i64, 0},
3422 {ISD::TRUNCATE, MVT::nxv4i8, MVT::nxv4i16, 0},
3423 {ISD::TRUNCATE, MVT::nxv4i8, MVT::nxv4i32, 0},
3424 {ISD::TRUNCATE, MVT::nxv4i8, MVT::nxv4i64, 1},
3425 {ISD::TRUNCATE, MVT::nxv4i16, MVT::nxv4i32, 0},
3426 {ISD::TRUNCATE, MVT::nxv4i16, MVT::nxv4i64, 1},
3427 {ISD::TRUNCATE, MVT::nxv4i32, MVT::nxv4i64, 1},
3428 {ISD::TRUNCATE, MVT::nxv8i8, MVT::nxv8i16, 0},
3429 {ISD::TRUNCATE, MVT::nxv8i8, MVT::nxv8i32, 1},
3430 {ISD::TRUNCATE, MVT::nxv8i8, MVT::nxv8i64, 3},
3431 {ISD::TRUNCATE, MVT::nxv8i16, MVT::nxv8i32, 1},
3432 {ISD::TRUNCATE, MVT::nxv8i16, MVT::nxv8i64, 3},
3433 {ISD::TRUNCATE, MVT::nxv16i8, MVT::nxv16i16, 1},
3434 {ISD::TRUNCATE, MVT::nxv16i8, MVT::nxv16i32, 3},
3435 {ISD::TRUNCATE, MVT::nxv16i8, MVT::nxv16i64, 7},
3436
3437 // The number of shll instructions for the extension.
3438 {ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3},
3439 {ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3},
3440 {ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 2},
3441 {ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 2},
3442 {ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3},
3443 {ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3},
3444 {ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 2},
3445 {ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 2},
3446 {ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 7},
3447 {ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 7},
3448 {ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 6},
3449 {ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 6},
3450 {ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2},
3451 {ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2},
3452 {ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6},
3453 {ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6},
3454
3455 // FP Ext and trunc
3456 {ISD::FP_EXTEND, MVT::f64, MVT::f32, 1}, // fcvt
3457 {ISD::FP_EXTEND, MVT::v2f64, MVT::v2f32, 1}, // fcvtl
3458 {ISD::FP_EXTEND, MVT::v4f64, MVT::v4f32, 2}, // fcvtl+fcvtl2
3459 // FP16
3460 {ISD::FP_EXTEND, MVT::f32, MVT::f16, 1}, // fcvt
3461 {ISD::FP_EXTEND, MVT::f64, MVT::f16, 1}, // fcvt
3462 {ISD::FP_EXTEND, MVT::v4f32, MVT::v4f16, 1}, // fcvtl
3463 {ISD::FP_EXTEND, MVT::v8f32, MVT::v8f16, 2}, // fcvtl+fcvtl2
3464 {ISD::FP_EXTEND, MVT::v2f64, MVT::v2f16, 2}, // fcvtl+fcvtl
3465 {ISD::FP_EXTEND, MVT::v4f64, MVT::v4f16, 3}, // fcvtl+fcvtl2+fcvtl
3466 {ISD::FP_EXTEND, MVT::v8f64, MVT::v8f16, 6}, // 2 * fcvtl+fcvtl2+fcvtl
3467 // BF16 (uses shift)
3468 {ISD::FP_EXTEND, MVT::f32, MVT::bf16, 1}, // shl
3469 {ISD::FP_EXTEND, MVT::f64, MVT::bf16, 2}, // shl+fcvt
3470 {ISD::FP_EXTEND, MVT::v4f32, MVT::v4bf16, 1}, // shll
3471 {ISD::FP_EXTEND, MVT::v8f32, MVT::v8bf16, 2}, // shll+shll2
3472 {ISD::FP_EXTEND, MVT::v2f64, MVT::v2bf16, 2}, // shll+fcvtl
3473 {ISD::FP_EXTEND, MVT::v4f64, MVT::v4bf16, 3}, // shll+fcvtl+fcvtl2
3474 {ISD::FP_EXTEND, MVT::v8f64, MVT::v8bf16, 6}, // 2 * shll+fcvtl+fcvtl2
3475 // FP Ext and trunc
3476 {ISD::FP_ROUND, MVT::f32, MVT::f64, 1}, // fcvt
3477 {ISD::FP_ROUND, MVT::v2f32, MVT::v2f64, 1}, // fcvtn
3478 {ISD::FP_ROUND, MVT::v4f32, MVT::v4f64, 2}, // fcvtn+fcvtn2
3479 // FP16
3480 {ISD::FP_ROUND, MVT::f16, MVT::f32, 1}, // fcvt
3481 {ISD::FP_ROUND, MVT::f16, MVT::f64, 1}, // fcvt
3482 {ISD::FP_ROUND, MVT::v4f16, MVT::v4f32, 1}, // fcvtn
3483 {ISD::FP_ROUND, MVT::v8f16, MVT::v8f32, 2}, // fcvtn+fcvtn2
3484 {ISD::FP_ROUND, MVT::v2f16, MVT::v2f64, 2}, // fcvtn+fcvtn
3485 {ISD::FP_ROUND, MVT::v4f16, MVT::v4f64, 3}, // fcvtn+fcvtn2+fcvtn
3486 {ISD::FP_ROUND, MVT::v8f16, MVT::v8f64, 6}, // 2 * fcvtn+fcvtn2+fcvtn
3487 // BF16 (more complex, with +bf16 is handled above)
3488 {ISD::FP_ROUND, MVT::bf16, MVT::f32, 8}, // Expansion is ~8 insns
3489 {ISD::FP_ROUND, MVT::bf16, MVT::f64, 9}, // fcvtn + above
3490 {ISD::FP_ROUND, MVT::v2bf16, MVT::v2f32, 8},
3491 {ISD::FP_ROUND, MVT::v4bf16, MVT::v4f32, 8},
3492 {ISD::FP_ROUND, MVT::v8bf16, MVT::v8f32, 15},
3493 {ISD::FP_ROUND, MVT::v2bf16, MVT::v2f64, 9},
3494 {ISD::FP_ROUND, MVT::v4bf16, MVT::v4f64, 10},
3495 {ISD::FP_ROUND, MVT::v8bf16, MVT::v8f64, 19},
3496
3497 // LowerVectorINT_TO_FP:
3498 {ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i32, 1},
3499 {ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1},
3500 {ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1},
3501 {ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1},
3502 {ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1},
3503 {ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1},
3504
3505 // SVE: to nxv2f16
3506 {ISD::SINT_TO_FP, MVT::nxv2f16, MVT::nxv2i8,
3507 SVE_EXT_COST + SVE_FCVT_COST},
3508 {ISD::SINT_TO_FP, MVT::nxv2f16, MVT::nxv2i16, SVE_FCVT_COST},
3509 {ISD::SINT_TO_FP, MVT::nxv2f16, MVT::nxv2i32, SVE_FCVT_COST},
3510 {ISD::SINT_TO_FP, MVT::nxv2f16, MVT::nxv2i64, SVE_FCVT_COST},
3511 {ISD::UINT_TO_FP, MVT::nxv2f16, MVT::nxv2i8,
3512 SVE_EXT_COST + SVE_FCVT_COST},
3513 {ISD::UINT_TO_FP, MVT::nxv2f16, MVT::nxv2i16, SVE_FCVT_COST},
3514 {ISD::UINT_TO_FP, MVT::nxv2f16, MVT::nxv2i32, SVE_FCVT_COST},
3515 {ISD::UINT_TO_FP, MVT::nxv2f16, MVT::nxv2i64, SVE_FCVT_COST},
3516
3517 // SVE: to nxv4f16
3518 {ISD::SINT_TO_FP, MVT::nxv4f16, MVT::nxv4i8,
3519 SVE_EXT_COST + SVE_FCVT_COST},
3520 {ISD::SINT_TO_FP, MVT::nxv4f16, MVT::nxv4i16, SVE_FCVT_COST},
3521 {ISD::SINT_TO_FP, MVT::nxv4f16, MVT::nxv4i32, SVE_FCVT_COST},
3522 {ISD::UINT_TO_FP, MVT::nxv4f16, MVT::nxv4i8,
3523 SVE_EXT_COST + SVE_FCVT_COST},
3524 {ISD::UINT_TO_FP, MVT::nxv4f16, MVT::nxv4i16, SVE_FCVT_COST},
3525 {ISD::UINT_TO_FP, MVT::nxv4f16, MVT::nxv4i32, SVE_FCVT_COST},
3526
3527 // SVE: to nxv8f16
3528 {ISD::SINT_TO_FP, MVT::nxv8f16, MVT::nxv8i8,
3529 SVE_EXT_COST + SVE_FCVT_COST},
3530 {ISD::SINT_TO_FP, MVT::nxv8f16, MVT::nxv8i16, SVE_FCVT_COST},
3531 {ISD::UINT_TO_FP, MVT::nxv8f16, MVT::nxv8i8,
3532 SVE_EXT_COST + SVE_FCVT_COST},
3533 {ISD::UINT_TO_FP, MVT::nxv8f16, MVT::nxv8i16, SVE_FCVT_COST},
3534
3535 // SVE: to nxv16f16
3536 {ISD::SINT_TO_FP, MVT::nxv16f16, MVT::nxv16i8,
3537 SVE_UNPACK_ONCE + 2 * SVE_FCVT_COST},
3538 {ISD::UINT_TO_FP, MVT::nxv16f16, MVT::nxv16i8,
3539 SVE_UNPACK_ONCE + 2 * SVE_FCVT_COST},
3540
3541 // Complex: to v2f32
3542 {ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i8, 3},
3543 {ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i16, 3},
3544 {ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i8, 3},
3545 {ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i16, 3},
3546
3547 // SVE: to nxv2f32
3548 {ISD::SINT_TO_FP, MVT::nxv2f32, MVT::nxv2i8,
3549 SVE_EXT_COST + SVE_FCVT_COST},
3550 {ISD::SINT_TO_FP, MVT::nxv2f32, MVT::nxv2i16, SVE_FCVT_COST},
3551 {ISD::SINT_TO_FP, MVT::nxv2f32, MVT::nxv2i32, SVE_FCVT_COST},
3552 {ISD::SINT_TO_FP, MVT::nxv2f32, MVT::nxv2i64, SVE_FCVT_COST},
3553 {ISD::UINT_TO_FP, MVT::nxv2f32, MVT::nxv2i8,
3554 SVE_EXT_COST + SVE_FCVT_COST},
3555 {ISD::UINT_TO_FP, MVT::nxv2f32, MVT::nxv2i16, SVE_FCVT_COST},
3556 {ISD::UINT_TO_FP, MVT::nxv2f32, MVT::nxv2i32, SVE_FCVT_COST},
3557 {ISD::UINT_TO_FP, MVT::nxv2f32, MVT::nxv2i64, SVE_FCVT_COST},
3558
3559 // Complex: to v4f32
3560 {ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 4},
3561 {ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2},
3562 {ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 3},
3563 {ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2},
3564
3565 // SVE: to nxv4f32
3566 {ISD::SINT_TO_FP, MVT::nxv4f32, MVT::nxv4i8,
3567 SVE_EXT_COST + SVE_FCVT_COST},
3568 {ISD::SINT_TO_FP, MVT::nxv4f32, MVT::nxv4i16, SVE_FCVT_COST},
3569 {ISD::SINT_TO_FP, MVT::nxv4f32, MVT::nxv4i32, SVE_FCVT_COST},
3570 {ISD::UINT_TO_FP, MVT::nxv4f32, MVT::nxv4i8,
3571 SVE_EXT_COST + SVE_FCVT_COST},
3572 {ISD::UINT_TO_FP, MVT::nxv4f32, MVT::nxv4i16, SVE_FCVT_COST},
3573 {ISD::SINT_TO_FP, MVT::nxv4f32, MVT::nxv4i32, SVE_FCVT_COST},
3574
3575 // Complex: to v8f32
3576 {ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 10},
3577 {ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4},
3578 {ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 10},
3579 {ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4},
3580
3581 // SVE: to nxv8f32
3582 {ISD::SINT_TO_FP, MVT::nxv8f32, MVT::nxv8i8,
3583 SVE_EXT_COST + SVE_UNPACK_ONCE + 2 * SVE_FCVT_COST},
3584 {ISD::SINT_TO_FP, MVT::nxv8f32, MVT::nxv8i16,
3585 SVE_UNPACK_ONCE + 2 * SVE_FCVT_COST},
3586 {ISD::UINT_TO_FP, MVT::nxv8f32, MVT::nxv8i8,
3587 SVE_EXT_COST + SVE_UNPACK_ONCE + 2 * SVE_FCVT_COST},
3588 {ISD::UINT_TO_FP, MVT::nxv8f32, MVT::nxv8i16,
3589 SVE_UNPACK_ONCE + 2 * SVE_FCVT_COST},
3590
3591 // SVE: to nxv16f32
3592 {ISD::SINT_TO_FP, MVT::nxv16f32, MVT::nxv16i8,
3593 SVE_UNPACK_TWICE + 4 * SVE_FCVT_COST},
3594 {ISD::UINT_TO_FP, MVT::nxv16f32, MVT::nxv16i8,
3595 SVE_UNPACK_TWICE + 4 * SVE_FCVT_COST},
3596
3597 // Complex: to v16f32
3598 {ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 21},
3599 {ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 21},
3600
3601 // Complex: to v2f64
3602 {ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8, 4},
3603 {ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 4},
3604 {ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2},
3605 {ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 4},
3606 {ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 4},
3607 {ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2},
3608
3609 // SVE: to nxv2f64
3610 {ISD::SINT_TO_FP, MVT::nxv2f64, MVT::nxv2i8,
3611 SVE_EXT_COST + SVE_FCVT_COST},
3612 {ISD::SINT_TO_FP, MVT::nxv2f64, MVT::nxv2i16, SVE_FCVT_COST},
3613 {ISD::SINT_TO_FP, MVT::nxv2f64, MVT::nxv2i32, SVE_FCVT_COST},
3614 {ISD::SINT_TO_FP, MVT::nxv2f64, MVT::nxv2i64, SVE_FCVT_COST},
3615 {ISD::UINT_TO_FP, MVT::nxv2f64, MVT::nxv2i8,
3616 SVE_EXT_COST + SVE_FCVT_COST},
3617 {ISD::UINT_TO_FP, MVT::nxv2f64, MVT::nxv2i16, SVE_FCVT_COST},
3618 {ISD::UINT_TO_FP, MVT::nxv2f64, MVT::nxv2i32, SVE_FCVT_COST},
3619 {ISD::UINT_TO_FP, MVT::nxv2f64, MVT::nxv2i64, SVE_FCVT_COST},
3620
3621 // Complex: to v4f64
3622 {ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 4},
3623 {ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 4},
3624
3625 // SVE: to nxv4f64
3626 {ISD::SINT_TO_FP, MVT::nxv4f64, MVT::nxv4i8,
3627 SVE_EXT_COST + SVE_UNPACK_ONCE + 2 * SVE_FCVT_COST},
3628 {ISD::SINT_TO_FP, MVT::nxv4f64, MVT::nxv4i16,
3629 SVE_UNPACK_ONCE + 2 * SVE_FCVT_COST},
3630 {ISD::SINT_TO_FP, MVT::nxv4f64, MVT::nxv4i32,
3631 SVE_UNPACK_ONCE + 2 * SVE_FCVT_COST},
3632 {ISD::UINT_TO_FP, MVT::nxv4f64, MVT::nxv4i8,
3633 SVE_EXT_COST + SVE_UNPACK_ONCE + 2 * SVE_FCVT_COST},
3634 {ISD::UINT_TO_FP, MVT::nxv4f64, MVT::nxv4i16,
3635 SVE_UNPACK_ONCE + 2 * SVE_FCVT_COST},
3636 {ISD::UINT_TO_FP, MVT::nxv4f64, MVT::nxv4i32,
3637 SVE_UNPACK_ONCE + 2 * SVE_FCVT_COST},
3638
3639 // SVE: to nxv8f64
3640 {ISD::SINT_TO_FP, MVT::nxv8f64, MVT::nxv8i8,
3641 SVE_EXT_COST + SVE_UNPACK_TWICE + 4 * SVE_FCVT_COST},
3642 {ISD::SINT_TO_FP, MVT::nxv8f64, MVT::nxv8i16,
3643 SVE_UNPACK_TWICE + 4 * SVE_FCVT_COST},
3644 {ISD::UINT_TO_FP, MVT::nxv8f64, MVT::nxv8i8,
3645 SVE_EXT_COST + SVE_UNPACK_TWICE + 4 * SVE_FCVT_COST},
3646 {ISD::UINT_TO_FP, MVT::nxv8f64, MVT::nxv8i16,
3647 SVE_UNPACK_TWICE + 4 * SVE_FCVT_COST},
3648
3649 // LowerVectorFP_TO_INT
3650 {ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f32, 1},
3651 {ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1},
3652 {ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1},
3653 {ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1},
3654 {ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1},
3655 {ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1},
3656
3657 // Complex, from v2f32: legal type is v2i32 (no cost) or v2i64 (1 ext).
3658 {ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 2},
3659 {ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f32, 1},
3660 {ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f32, 1},
3661 {ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 2},
3662 {ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f32, 1},
3663 {ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f32, 1},
3664
3665 // Complex, from v4f32: legal type is v4i16, 1 narrowing => ~2
3666 {ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2},
3667 {ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 2},
3668 {ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2},
3669 {ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f32, 2},
3670
3671 // Complex, from nxv2f32.
3672 {ISD::FP_TO_SINT, MVT::nxv2i64, MVT::nxv2f32, 1},
3673 {ISD::FP_TO_SINT, MVT::nxv2i32, MVT::nxv2f32, 1},
3674 {ISD::FP_TO_SINT, MVT::nxv2i16, MVT::nxv2f32, 1},
3675 {ISD::FP_TO_SINT, MVT::nxv2i8, MVT::nxv2f32, 1},
3676 {ISD::FP_TO_UINT, MVT::nxv2i64, MVT::nxv2f32, 1},
3677 {ISD::FP_TO_UINT, MVT::nxv2i32, MVT::nxv2f32, 1},
3678 {ISD::FP_TO_UINT, MVT::nxv2i16, MVT::nxv2f32, 1},
3679 {ISD::FP_TO_UINT, MVT::nxv2i8, MVT::nxv2f32, 1},
3680
3681 // Complex, from v2f64: legal type is v2i32, 1 narrowing => ~2.
3682 {ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 2},
3683 {ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f64, 2},
3684 {ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f64, 2},
3685 {ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 2},
3686 {ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f64, 2},
3687 {ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f64, 2},
3688
3689 // Complex, from nxv2f64.
3690 {ISD::FP_TO_SINT, MVT::nxv2i64, MVT::nxv2f64, 1},
3691 {ISD::FP_TO_SINT, MVT::nxv2i32, MVT::nxv2f64, 1},
3692 {ISD::FP_TO_SINT, MVT::nxv2i16, MVT::nxv2f64, 1},
3693 {ISD::FP_TO_SINT, MVT::nxv2i8, MVT::nxv2f64, 1},
3694 {ISD::FP_TO_SINT, MVT::nxv2i1, MVT::nxv2f64, 1},
3695 {ISD::FP_TO_UINT, MVT::nxv2i64, MVT::nxv2f64, 1},
3696 {ISD::FP_TO_UINT, MVT::nxv2i32, MVT::nxv2f64, 1},
3697 {ISD::FP_TO_UINT, MVT::nxv2i16, MVT::nxv2f64, 1},
3698 {ISD::FP_TO_UINT, MVT::nxv2i8, MVT::nxv2f64, 1},
3699 {ISD::FP_TO_UINT, MVT::nxv2i1, MVT::nxv2f64, 1},
3700
3701 // Complex, from nxv4f32.
3702 {ISD::FP_TO_SINT, MVT::nxv4i64, MVT::nxv4f32, 4},
3703 {ISD::FP_TO_SINT, MVT::nxv4i32, MVT::nxv4f32, 1},
3704 {ISD::FP_TO_SINT, MVT::nxv4i16, MVT::nxv4f32, 1},
3705 {ISD::FP_TO_SINT, MVT::nxv4i8, MVT::nxv4f32, 1},
3706 {ISD::FP_TO_SINT, MVT::nxv4i1, MVT::nxv4f32, 1},
3707 {ISD::FP_TO_UINT, MVT::nxv4i64, MVT::nxv4f32, 4},
3708 {ISD::FP_TO_UINT, MVT::nxv4i32, MVT::nxv4f32, 1},
3709 {ISD::FP_TO_UINT, MVT::nxv4i16, MVT::nxv4f32, 1},
3710 {ISD::FP_TO_UINT, MVT::nxv4i8, MVT::nxv4f32, 1},
3711 {ISD::FP_TO_UINT, MVT::nxv4i1, MVT::nxv4f32, 1},
3712
3713 // Complex, from nxv8f64. Illegal -> illegal conversions not required.
3714 {ISD::FP_TO_SINT, MVT::nxv8i16, MVT::nxv8f64, 7},
3715 {ISD::FP_TO_SINT, MVT::nxv8i8, MVT::nxv8f64, 7},
3716 {ISD::FP_TO_UINT, MVT::nxv8i16, MVT::nxv8f64, 7},
3717 {ISD::FP_TO_UINT, MVT::nxv8i8, MVT::nxv8f64, 7},
3718
3719 // Complex, from nxv4f64. Illegal -> illegal conversions not required.
3720 {ISD::FP_TO_SINT, MVT::nxv4i32, MVT::nxv4f64, 3},
3721 {ISD::FP_TO_SINT, MVT::nxv4i16, MVT::nxv4f64, 3},
3722 {ISD::FP_TO_SINT, MVT::nxv4i8, MVT::nxv4f64, 3},
3723 {ISD::FP_TO_UINT, MVT::nxv4i32, MVT::nxv4f64, 3},
3724 {ISD::FP_TO_UINT, MVT::nxv4i16, MVT::nxv4f64, 3},
3725 {ISD::FP_TO_UINT, MVT::nxv4i8, MVT::nxv4f64, 3},
3726
3727 // Complex, from nxv8f32. Illegal -> illegal conversions not required.
3728 {ISD::FP_TO_SINT, MVT::nxv8i16, MVT::nxv8f32, 3},
3729 {ISD::FP_TO_SINT, MVT::nxv8i8, MVT::nxv8f32, 3},
3730 {ISD::FP_TO_UINT, MVT::nxv8i16, MVT::nxv8f32, 3},
3731 {ISD::FP_TO_UINT, MVT::nxv8i8, MVT::nxv8f32, 3},
3732
3733 // Complex, from nxv8f16.
3734 {ISD::FP_TO_SINT, MVT::nxv8i64, MVT::nxv8f16, 10},
3735 {ISD::FP_TO_SINT, MVT::nxv8i32, MVT::nxv8f16, 4},
3736 {ISD::FP_TO_SINT, MVT::nxv8i16, MVT::nxv8f16, 1},
3737 {ISD::FP_TO_SINT, MVT::nxv8i8, MVT::nxv8f16, 1},
3738 {ISD::FP_TO_SINT, MVT::nxv8i1, MVT::nxv8f16, 1},
3739 {ISD::FP_TO_UINT, MVT::nxv8i64, MVT::nxv8f16, 10},
3740 {ISD::FP_TO_UINT, MVT::nxv8i32, MVT::nxv8f16, 4},
3741 {ISD::FP_TO_UINT, MVT::nxv8i16, MVT::nxv8f16, 1},
3742 {ISD::FP_TO_UINT, MVT::nxv8i8, MVT::nxv8f16, 1},
3743 {ISD::FP_TO_UINT, MVT::nxv8i1, MVT::nxv8f16, 1},
3744
3745 // Complex, from nxv4f16.
3746 {ISD::FP_TO_SINT, MVT::nxv4i64, MVT::nxv4f16, 4},
3747 {ISD::FP_TO_SINT, MVT::nxv4i32, MVT::nxv4f16, 1},
3748 {ISD::FP_TO_SINT, MVT::nxv4i16, MVT::nxv4f16, 1},
3749 {ISD::FP_TO_SINT, MVT::nxv4i8, MVT::nxv4f16, 1},
3750 {ISD::FP_TO_UINT, MVT::nxv4i64, MVT::nxv4f16, 4},
3751 {ISD::FP_TO_UINT, MVT::nxv4i32, MVT::nxv4f16, 1},
3752 {ISD::FP_TO_UINT, MVT::nxv4i16, MVT::nxv4f16, 1},
3753 {ISD::FP_TO_UINT, MVT::nxv4i8, MVT::nxv4f16, 1},
3754
3755 // Complex, from nxv2f16.
3756 {ISD::FP_TO_SINT, MVT::nxv2i64, MVT::nxv2f16, 1},
3757 {ISD::FP_TO_SINT, MVT::nxv2i32, MVT::nxv2f16, 1},
3758 {ISD::FP_TO_SINT, MVT::nxv2i16, MVT::nxv2f16, 1},
3759 {ISD::FP_TO_SINT, MVT::nxv2i8, MVT::nxv2f16, 1},
3760 {ISD::FP_TO_UINT, MVT::nxv2i64, MVT::nxv2f16, 1},
3761 {ISD::FP_TO_UINT, MVT::nxv2i32, MVT::nxv2f16, 1},
3762 {ISD::FP_TO_UINT, MVT::nxv2i16, MVT::nxv2f16, 1},
3763 {ISD::FP_TO_UINT, MVT::nxv2i8, MVT::nxv2f16, 1},
3764
3765 // Truncate from nxvmf32 to nxvmf16.
3766 {ISD::FP_ROUND, MVT::nxv2f16, MVT::nxv2f32, 1},
3767 {ISD::FP_ROUND, MVT::nxv4f16, MVT::nxv4f32, 1},
3768 {ISD::FP_ROUND, MVT::nxv8f16, MVT::nxv8f32, 3},
3769
3770 // Truncate from nxvmf32 to nxvmbf16.
3771 {ISD::FP_ROUND, MVT::nxv2bf16, MVT::nxv2f32, 8},
3772 {ISD::FP_ROUND, MVT::nxv4bf16, MVT::nxv4f32, 8},
3773 {ISD::FP_ROUND, MVT::nxv8bf16, MVT::nxv8f32, 17},
3774
3775 // Truncate from nxvmf64 to nxvmf16.
3776 {ISD::FP_ROUND, MVT::nxv2f16, MVT::nxv2f64, 1},
3777 {ISD::FP_ROUND, MVT::nxv4f16, MVT::nxv4f64, 3},
3778 {ISD::FP_ROUND, MVT::nxv8f16, MVT::nxv8f64, 7},
3779
3780 // Truncate from nxvmf64 to nxvmbf16.
3781 {ISD::FP_ROUND, MVT::nxv2bf16, MVT::nxv2f64, 9},
3782 {ISD::FP_ROUND, MVT::nxv4bf16, MVT::nxv4f64, 19},
3783 {ISD::FP_ROUND, MVT::nxv8bf16, MVT::nxv8f64, 39},
3784
3785 // Truncate from nxvmf64 to nxvmf32.
3786 {ISD::FP_ROUND, MVT::nxv2f32, MVT::nxv2f64, 1},
3787 {ISD::FP_ROUND, MVT::nxv4f32, MVT::nxv4f64, 3},
3788 {ISD::FP_ROUND, MVT::nxv8f32, MVT::nxv8f64, 6},
3789
3790 // Extend from nxvmf16 to nxvmf32.
3791 {ISD::FP_EXTEND, MVT::nxv2f32, MVT::nxv2f16, 1},
3792 {ISD::FP_EXTEND, MVT::nxv4f32, MVT::nxv4f16, 1},
3793 {ISD::FP_EXTEND, MVT::nxv8f32, MVT::nxv8f16, 2},
3794
3795 // Extend from nxvmbf16 to nxvmf32.
3796 {ISD::FP_EXTEND, MVT::nxv2f32, MVT::nxv2bf16, 1}, // lsl
3797 {ISD::FP_EXTEND, MVT::nxv4f32, MVT::nxv4bf16, 1}, // lsl
3798 {ISD::FP_EXTEND, MVT::nxv8f32, MVT::nxv8bf16, 4}, // unpck+unpck+lsl+lsl
3799
3800 // Extend from nxvmf16 to nxvmf64.
3801 {ISD::FP_EXTEND, MVT::nxv2f64, MVT::nxv2f16, 1},
3802 {ISD::FP_EXTEND, MVT::nxv4f64, MVT::nxv4f16, 2},
3803 {ISD::FP_EXTEND, MVT::nxv8f64, MVT::nxv8f16, 4},
3804
3805 // Extend from nxvmbf16 to nxvmf64.
3806 {ISD::FP_EXTEND, MVT::nxv2f64, MVT::nxv2bf16, 2}, // lsl+fcvt
3807 {ISD::FP_EXTEND, MVT::nxv4f64, MVT::nxv4bf16, 6}, // 2*unpck+2*lsl+2*fcvt
3808 {ISD::FP_EXTEND, MVT::nxv8f64, MVT::nxv8bf16, 14}, // 6*unpck+4*lsl+4*fcvt
3809
3810 // Extend from nxvmf32 to nxvmf64.
3811 {ISD::FP_EXTEND, MVT::nxv2f64, MVT::nxv2f32, 1},
3812 {ISD::FP_EXTEND, MVT::nxv4f64, MVT::nxv4f32, 2},
3813 {ISD::FP_EXTEND, MVT::nxv8f64, MVT::nxv8f32, 6},
3814
3815 // Bitcasts from float to integer
3816 {ISD::BITCAST, MVT::nxv2f16, MVT::nxv2i16, 0},
3817 {ISD::BITCAST, MVT::nxv4f16, MVT::nxv4i16, 0},
3818 {ISD::BITCAST, MVT::nxv2f32, MVT::nxv2i32, 0},
3819
3820 // Bitcasts from integer to float
3821 {ISD::BITCAST, MVT::nxv2i16, MVT::nxv2f16, 0},
3822 {ISD::BITCAST, MVT::nxv4i16, MVT::nxv4f16, 0},
3823 {ISD::BITCAST, MVT::nxv2i32, MVT::nxv2f32, 0},
3824
3825 // Add cost for extending to illegal -too wide- scalable vectors.
3826 // zero/sign extend are implemented by multiple unpack operations,
3827 // where each operation has a cost of 1.
3828 {ISD::ZERO_EXTEND, MVT::nxv16i16, MVT::nxv16i8, 2},
3829 {ISD::ZERO_EXTEND, MVT::nxv16i32, MVT::nxv16i8, 6},
3830 {ISD::ZERO_EXTEND, MVT::nxv16i64, MVT::nxv16i8, 14},
3831 {ISD::ZERO_EXTEND, MVT::nxv8i32, MVT::nxv8i16, 2},
3832 {ISD::ZERO_EXTEND, MVT::nxv8i64, MVT::nxv8i16, 6},
3833 {ISD::ZERO_EXTEND, MVT::nxv4i64, MVT::nxv4i32, 2},
3834
3835 {ISD::SIGN_EXTEND, MVT::nxv16i16, MVT::nxv16i8, 2},
3836 {ISD::SIGN_EXTEND, MVT::nxv16i32, MVT::nxv16i8, 6},
3837 {ISD::SIGN_EXTEND, MVT::nxv16i64, MVT::nxv16i8, 14},
3838 {ISD::SIGN_EXTEND, MVT::nxv8i32, MVT::nxv8i16, 2},
3839 {ISD::SIGN_EXTEND, MVT::nxv8i64, MVT::nxv8i16, 6},
3840 {ISD::SIGN_EXTEND, MVT::nxv4i64, MVT::nxv4i32, 2},
3841 };
3842
3843 // We have to estimate a cost of fixed length operation upon
3844 // SVE registers(operations) with the number of registers required
3845 // for a fixed type to be represented upon SVE registers.
3846 EVT WiderTy = SrcTy.bitsGT(DstTy) ? SrcTy : DstTy;
3847 if (SrcTy.isFixedLengthVector() && DstTy.isFixedLengthVector() &&
3848 SrcTy.getVectorNumElements() == DstTy.getVectorNumElements() &&
3849 ST->useSVEForFixedLengthVectors(WiderTy)) {
3850 std::pair<InstructionCost, MVT> LT =
3851 getTypeLegalizationCost(WiderTy.getTypeForEVT(Dst->getContext()));
3852 unsigned NumElements =
3853 AArch64::SVEBitsPerBlock / LT.second.getScalarSizeInBits();
3854 return LT.first *
3856 Opcode,
3857 ScalableVectorType::get(Dst->getScalarType(), NumElements),
3858 ScalableVectorType::get(Src->getScalarType(), NumElements), CCH,
3859 CostKind, I);
3860 }
3861
3862 if (const auto *Entry = ConvertCostTableLookup(
3863 ConversionTbl, ISD, DstTy.getSimpleVT(), SrcTy.getSimpleVT()))
3864 return Entry->Cost;
3865
3866 static const TypeConversionCostTblEntry FP16Tbl[] = {
3867 {ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f16, 1}, // fcvtzs
3868 {ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f16, 1},
3869 {ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f16, 1}, // fcvtzs
3870 {ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f16, 1},
3871 {ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f16, 2}, // fcvtl+fcvtzs
3872 {ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f16, 2},
3873 {ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f16, 2}, // fcvtzs+xtn
3874 {ISD::FP_TO_UINT, MVT::v8i8, MVT::v8f16, 2},
3875 {ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f16, 1}, // fcvtzs
3876 {ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f16, 1},
3877 {ISD::FP_TO_SINT, MVT::v8i32, MVT::v8f16, 4}, // 2*fcvtl+2*fcvtzs
3878 {ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f16, 4},
3879 {ISD::FP_TO_SINT, MVT::v16i8, MVT::v16f16, 3}, // 2*fcvtzs+xtn
3880 {ISD::FP_TO_UINT, MVT::v16i8, MVT::v16f16, 3},
3881 {ISD::FP_TO_SINT, MVT::v16i16, MVT::v16f16, 2}, // 2*fcvtzs
3882 {ISD::FP_TO_UINT, MVT::v16i16, MVT::v16f16, 2},
3883 {ISD::FP_TO_SINT, MVT::v16i32, MVT::v16f16, 8}, // 4*fcvtl+4*fcvtzs
3884 {ISD::FP_TO_UINT, MVT::v16i32, MVT::v16f16, 8},
3885 {ISD::UINT_TO_FP, MVT::v8f16, MVT::v8i8, 2}, // ushll + ucvtf
3886 {ISD::SINT_TO_FP, MVT::v8f16, MVT::v8i8, 2}, // sshll + scvtf
3887 {ISD::UINT_TO_FP, MVT::v16f16, MVT::v16i8, 4}, // 2 * ushl(2) + 2 * ucvtf
3888 {ISD::SINT_TO_FP, MVT::v16f16, MVT::v16i8, 4}, // 2 * sshl(2) + 2 * scvtf
3889 };
3890
3891 if (ST->hasFullFP16())
3892 if (const auto *Entry = ConvertCostTableLookup(
3893 FP16Tbl, ISD, DstTy.getSimpleVT(), SrcTy.getSimpleVT()))
3894 return Entry->Cost;
3895
3896 // INT_TO_FP of i64->f32 will scalarize, which is required to avoid
3897 // double-rounding issues.
3898 if ((ISD == ISD::SINT_TO_FP || ISD == ISD::UINT_TO_FP) &&
3899 DstTy.getScalarType() == MVT::f32 && SrcTy.getScalarSizeInBits() > 32 &&
3901 return cast<FixedVectorType>(Dst)->getNumElements() *
3902 getCastInstrCost(Opcode, Dst->getScalarType(),
3903 Src->getScalarType(), CCH, CostKind) +
3905 true, CostKind) +
3907 false, CostKind);
3908
3909 if ((ISD == ISD::ZERO_EXTEND || ISD == ISD::SIGN_EXTEND) &&
3911 ST->isSVEorStreamingSVEAvailable() &&
3912 TLI->getTypeAction(Src->getContext(), SrcTy) ==
3914 TLI->getTypeAction(Dst->getContext(), DstTy) ==
3916 // The standard behaviour in the backend for these cases is to split the
3917 // extend up into two parts:
3918 // 1. Perform an extending load or masked load up to the legal type.
3919 // 2. Extend the loaded data to the final type.
3920 std::pair<InstructionCost, MVT> SrcLT = getTypeLegalizationCost(Src);
3921 Type *LegalTy = EVT(SrcLT.second).getTypeForEVT(Src->getContext());
3923 Opcode, LegalTy, Src, CCH, CostKind, I);
3925 Opcode, Dst, LegalTy, TTI::CastContextHint::None, CostKind, I);
3926 return Part1 + Part2;
3927 }
3928
3929 // The BasicTTIImpl version only deals with CCH==TTI::CastContextHint::Normal,
3930 // but we also want to include the TTI::CastContextHint::Masked case too.
3931 if ((ISD == ISD::ZERO_EXTEND || ISD == ISD::SIGN_EXTEND) &&
3933 ST->isSVEorStreamingSVEAvailable() && TLI->isTypeLegal(DstTy))
3935
3936 return BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
3937}
3938
3941 VectorType *VecTy, unsigned Index,
3943
3944 // Make sure we were given a valid extend opcode.
3945 assert((Opcode == Instruction::SExt || Opcode == Instruction::ZExt) &&
3946 "Invalid opcode");
3947
3948 // We are extending an element we extract from a vector, so the source type
3949 // of the extend is the element type of the vector.
3950 auto *Src = VecTy->getElementType();
3951
3952 // Sign- and zero-extends are for integer types only.
3953 assert(isa<IntegerType>(Dst) && isa<IntegerType>(Src) && "Invalid type");
3954
3955 // Get the cost for the extract. We compute the cost (if any) for the extend
3956 // below.
3957 InstructionCost Cost = getVectorInstrCost(Instruction::ExtractElement, VecTy,
3958 CostKind, Index, nullptr, nullptr);
3959
3960 // Legalize the types.
3961 auto VecLT = getTypeLegalizationCost(VecTy);
3962 auto DstVT = TLI->getValueType(DL, Dst);
3963 auto SrcVT = TLI->getValueType(DL, Src);
3964
3965 // If the resulting type is still a vector and the destination type is legal,
3966 // we may get the extension for free. If not, get the default cost for the
3967 // extend.
3968 if (!VecLT.second.isVector() || !TLI->isTypeLegal(DstVT))
3969 return Cost + getCastInstrCost(Opcode, Dst, Src, TTI::CastContextHint::None,
3970 CostKind);
3971
3972 // The destination type should be larger than the element type. If not, get
3973 // the default cost for the extend.
3974 if (DstVT.getFixedSizeInBits() < SrcVT.getFixedSizeInBits())
3975 return Cost + getCastInstrCost(Opcode, Dst, Src, TTI::CastContextHint::None,
3976 CostKind);
3977
3978 switch (Opcode) {
3979 default:
3980 llvm_unreachable("Opcode should be either SExt or ZExt");
3981
3982 // For sign-extends, we only need a smov, which performs the extension
3983 // automatically.
3984 case Instruction::SExt:
3985 return Cost;
3986
3987 // For zero-extends, the extend is performed automatically by a umov unless
3988 // the destination type is i64 and the element type is i8 or i16.
3989 case Instruction::ZExt:
3990 if (DstVT.getSizeInBits() != 64u || SrcVT.getSizeInBits() == 32u)
3991 return Cost;
3992 }
3993
3994 // If we are unable to perform the extend for free, get the default cost.
3995 return Cost + getCastInstrCost(Opcode, Dst, Src, TTI::CastContextHint::None,
3996 CostKind);
3997}
3998
4001 const Instruction *I) const {
4003 return Opcode == Instruction::PHI ? 0 : 1;
4004 assert(CostKind == TTI::TCK_RecipThroughput && "unexpected CostKind");
4005 // Branches are assumed to be predicted.
4006 return 0;
4007}
4008
4009InstructionCost AArch64TTIImpl::getVectorInstrCostHelper(
4010 unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index,
4011 const Instruction *I, Value *Scalar,
4012 ArrayRef<std::tuple<Value *, User *, int>> ScalarUserAndIdx,
4013 TTI::VectorInstrContext VIC) const {
4014 assert(Val->isVectorTy() && "This must be a vector type");
4015
4016 if (Index != -1U) {
4017 // Legalize the type.
4018 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Val);
4019
4020 // This type is legalized to a scalar type.
4021 if (!LT.second.isVector())
4022 return 0;
4023
4024 // The type may be split. For fixed-width vectors we can normalize the
4025 // index to the new type.
4026 if (LT.second.isFixedLengthVector()) {
4027 unsigned Width = LT.second.getVectorNumElements();
4028 Index = Index % Width;
4029 }
4030
4031 // The element at index zero is already inside the vector.
4032 // - For a insert-element or extract-element
4033 // instruction that extracts integers, an explicit FPR -> GPR move is
4034 // needed. So it has non-zero cost.
4035 if (Index == 0 && !Val->getScalarType()->isIntegerTy())
4036 return 0;
4037
4038 // This is recognising a LD1 single-element structure to one lane of one
4039 // register instruction. I.e., if this is an `insertelement` instruction,
4040 // and its second operand is a load, then we will generate a LD1, which
4041 // are expensive instructions on some uArchs.
4042 if (VIC == TTI::VectorInstrContext::Load) {
4043 if (ST->hasFastLD1Single())
4044 return 0;
4045 return CostKind == TTI::TCK_CodeSize
4046 ? 0
4048 }
4049
4050 // i1 inserts and extract will include an extra cset or cmp of the vector
4051 // value. Increase the cost by 1 to account.
4052 if (Val->getScalarSizeInBits() == 1)
4053 return CostKind == TTI::TCK_CodeSize
4054 ? 2
4055 : ST->getVectorInsertExtractBaseCost() + 1;
4056
4057 // FIXME:
4058 // If the extract-element and insert-element instructions could be
4059 // simplified away (e.g., could be combined into users by looking at use-def
4060 // context), they have no cost. This is not done in the first place for
4061 // compile-time considerations.
4062 }
4063
4064 // In case of Neon, if there exists extractelement from lane != 0 such that
4065 // 1. extractelement does not necessitate a move from vector_reg -> GPR.
4066 // 2. extractelement result feeds into fmul.
4067 // 3. Other operand of fmul is an extractelement from lane 0 or lane
4068 // equivalent to 0.
4069 // then the extractelement can be merged with fmul in the backend and it
4070 // incurs no cost.
4071 // e.g.
4072 // define double @foo(<2 x double> %a) {
4073 // %1 = extractelement <2 x double> %a, i32 0
4074 // %2 = extractelement <2 x double> %a, i32 1
4075 // %res = fmul double %1, %2
4076 // ret double %res
4077 // }
4078 // %2 and %res can be merged in the backend to generate fmul d0, d0, v1.d[1]
4079 auto ExtractCanFuseWithFmul = [&]() {
4080 // We bail out if the extract is from lane 0.
4081 if (Index == 0)
4082 return false;
4083
4084 // Check if the scalar element type of the vector operand of ExtractElement
4085 // instruction is one of the allowed types.
4086 auto IsAllowedScalarTy = [&](const Type *T) {
4087 return T->isFloatTy() || T->isDoubleTy() ||
4088 (T->isHalfTy() && ST->hasFullFP16());
4089 };
4090
4091 // Check if the extractelement user is scalar fmul.
4092 auto IsUserFMulScalarTy = [](const Value *EEUser) {
4093 // Check if the user is scalar fmul.
4094 const auto *BO = dyn_cast<BinaryOperator>(EEUser);
4095 return BO && BO->getOpcode() == BinaryOperator::FMul &&
4096 !BO->getType()->isVectorTy();
4097 };
4098
4099 // Check if the extract index is from lane 0 or lane equivalent to 0 for a
4100 // certain scalar type and a certain vector register width.
4101 auto IsExtractLaneEquivalentToZero = [&](unsigned Idx, unsigned EltSz) {
4102 auto RegWidth =
4104 .getFixedValue();
4105 return Idx == 0 || (RegWidth != 0 && (Idx * EltSz) % RegWidth == 0);
4106 };
4107
4108 // Check if the type constraints on input vector type and result scalar type
4109 // of extractelement instruction are satisfied.
4110 if (!isa<FixedVectorType>(Val) || !IsAllowedScalarTy(Val->getScalarType()))
4111 return false;
4112
4113 if (Scalar) {
4114 DenseMap<User *, unsigned> UserToExtractIdx;
4115 for (auto *U : Scalar->users()) {
4116 if (!IsUserFMulScalarTy(U))
4117 return false;
4118 // Recording entry for the user is important. Index value is not
4119 // important.
4120 UserToExtractIdx[U];
4121 }
4122 if (UserToExtractIdx.empty())
4123 return false;
4124 for (auto &[S, U, L] : ScalarUserAndIdx) {
4125 for (auto *U : S->users()) {
4126 if (UserToExtractIdx.contains(U)) {
4127 auto *FMul = cast<BinaryOperator>(U);
4128 auto *Op0 = FMul->getOperand(0);
4129 auto *Op1 = FMul->getOperand(1);
4130 if ((Op0 == S && Op1 == S) || Op0 != S || Op1 != S) {
4131 UserToExtractIdx[U] = L;
4132 break;
4133 }
4134 }
4135 }
4136 }
4137 for (auto &[U, L] : UserToExtractIdx) {
4138 if (!IsExtractLaneEquivalentToZero(Index, Val->getScalarSizeInBits()) &&
4139 !IsExtractLaneEquivalentToZero(L, Val->getScalarSizeInBits()))
4140 return false;
4141 }
4142 } else {
4143 const auto *EE = cast<ExtractElementInst>(I);
4144
4145 const auto *IdxOp = dyn_cast<ConstantInt>(EE->getIndexOperand());
4146 if (!IdxOp)
4147 return false;
4148
4149 return !EE->users().empty() && all_of(EE->users(), [&](const User *U) {
4150 if (!IsUserFMulScalarTy(U))
4151 return false;
4152
4153 // Check if the other operand of extractelement is also extractelement
4154 // from lane equivalent to 0.
4155 const auto *BO = cast<BinaryOperator>(U);
4156 const auto *OtherEE = dyn_cast<ExtractElementInst>(
4157 BO->getOperand(0) == EE ? BO->getOperand(1) : BO->getOperand(0));
4158 if (OtherEE) {
4159 const auto *IdxOp = dyn_cast<ConstantInt>(OtherEE->getIndexOperand());
4160 if (!IdxOp)
4161 return false;
4162 return IsExtractLaneEquivalentToZero(
4163 cast<ConstantInt>(OtherEE->getIndexOperand())
4164 ->getValue()
4165 .getZExtValue(),
4166 OtherEE->getType()->getScalarSizeInBits());
4167 }
4168 return true;
4169 });
4170 }
4171 return true;
4172 };
4173
4174 if (Opcode == Instruction::ExtractElement && (I || Scalar) &&
4175 ExtractCanFuseWithFmul())
4176 return 0;
4177
4178 // All other insert/extracts cost this much.
4179 return CostKind == TTI::TCK_CodeSize ? 1
4180 : ST->getVectorInsertExtractBaseCost();
4181}
4182
4184 unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index,
4185 const Value *Op0, const Value *Op1, TTI::VectorInstrContext VIC) const {
4186 // Treat insert at lane 0 into a poison vector as having zero cost. This
4187 // ensures vector broadcasts via an insert + shuffle (and will be lowered to a
4188 // single dup) are treated as cheap.
4189 if (Opcode == Instruction::InsertElement && Index == 0 && Op0 &&
4190 isa<PoisonValue>(Op0))
4191 return 0;
4192 return getVectorInstrCostHelper(Opcode, Val, CostKind, Index, nullptr,
4193 nullptr, {}, VIC);
4194}
4195
4197 unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index,
4198 Value *Scalar, ArrayRef<std::tuple<Value *, User *, int>> ScalarUserAndIdx,
4199 TTI::VectorInstrContext VIC) const {
4200 return getVectorInstrCostHelper(Opcode, Val, CostKind, Index, nullptr, Scalar,
4201 ScalarUserAndIdx, VIC);
4202}
4203
4206 TTI::TargetCostKind CostKind, unsigned Index,
4207 TTI::VectorInstrContext VIC) const {
4208 return getVectorInstrCostHelper(I.getOpcode(), Val, CostKind, Index, &I,
4209 nullptr, {}, VIC);
4210}
4211
4215 unsigned Index) const {
4216 if (isa<FixedVectorType>(Val))
4218 Index);
4219
4220 // This typically requires both while and lastb instructions in order
4221 // to extract the last element. If this is in a loop the while
4222 // instruction can at least be hoisted out, although it will consume a
4223 // predicate register. The cost should be more expensive than the base
4224 // extract cost, which is 2 for most CPUs.
4225 return CostKind == TTI::TCK_CodeSize
4226 ? 2
4227 : ST->getVectorInsertExtractBaseCost() + 1;
4228}
4229
4231 VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract,
4232 TTI::TargetCostKind CostKind, bool ForPoisonSrc, ArrayRef<Value *> VL,
4233 TTI::VectorInstrContext VIC) const {
4236 if (Ty->getElementType()->isFloatingPointTy())
4237 return BaseT::getScalarizationOverhead(Ty, DemandedElts, Insert, Extract,
4238 CostKind);
4239 unsigned VecInstCost =
4240 CostKind == TTI::TCK_CodeSize ? 1 : ST->getVectorInsertExtractBaseCost();
4241 return DemandedElts.popcount() * (Insert + Extract) * VecInstCost;
4242}
4243
4244std::optional<InstructionCost> AArch64TTIImpl::getFP16BF16PromoteCost(
4246 TTI::OperandValueInfo Op2Info, bool IncludeTrunc, bool CanUseSVE,
4247 std::function<InstructionCost(Type *)> InstCost) const {
4248 if (!Ty->getScalarType()->isHalfTy() && !Ty->getScalarType()->isBFloatTy())
4249 return std::nullopt;
4250 if (Ty->getScalarType()->isHalfTy() && ST->hasFullFP16())
4251 return std::nullopt;
4252 if (CanUseSVE && Ty->isScalableTy() && ST->hasSVEB16B16() &&
4253 ST->isNonStreamingSVEorSME2Available())
4254 return std::nullopt;
4255
4256 Type *PromotedTy = Ty->getWithNewType(Type::getFloatTy(Ty->getContext()));
4257 InstructionCost Cost = getCastInstrCost(Instruction::FPExt, PromotedTy, Ty,
4259 if (!Op1Info.isConstant() && !Op2Info.isConstant())
4260 Cost *= 2;
4261 Cost += InstCost(PromotedTy);
4262 if (IncludeTrunc)
4263 Cost += getCastInstrCost(Instruction::FPTrunc, Ty, PromotedTy,
4265 return Cost;
4266}
4267
4269 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
4271 ArrayRef<const Value *> Args, const Instruction *CxtI) const {
4272
4273 // The code-generator is currently not able to handle scalable vectors
4274 // of <vscale x 1 x eltty> yet, so return an invalid cost to avoid selecting
4275 // it. This change will be removed when code-generation for these types is
4276 // sufficiently reliable.
4277 if (auto *VTy = dyn_cast<ScalableVectorType>(Ty))
4278 if (VTy->getElementCount() == ElementCount::getScalable(1))
4280
4281 // TODO: Handle more cost kinds.
4283 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
4284 Op2Info, Args, CxtI);
4285
4286 // Legalize the type.
4287 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
4288 int ISD = TLI->InstructionOpcodeToISD(Opcode);
4289
4290 // Increase the cost for half and bfloat types if not architecturally
4291 // supported.
4292 if (ISD == ISD::FADD || ISD == ISD::FSUB || ISD == ISD::FMUL ||
4293 ISD == ISD::FDIV || ISD == ISD::FREM)
4294 if (auto PromotedCost = getFP16BF16PromoteCost(
4295 Ty, CostKind, Op1Info, Op2Info, /*IncludeTrunc=*/true,
4296 // There is not native support for fdiv/frem even with +sve-b16b16.
4297 /*CanUseSVE=*/ISD != ISD::FDIV && ISD != ISD::FREM,
4298 [&](Type *PromotedTy) {
4299 return getArithmeticInstrCost(Opcode, PromotedTy, CostKind,
4300 Op1Info, Op2Info);
4301 }))
4302 return *PromotedCost;
4303
4304 // If the operation is a widening instruction (smull or umull) and both
4305 // operands are extends the cost can be cheaper by considering that the
4306 // operation will operate on the narrowest type size possible (double the
4307 // largest input size) and a further extend.
4308 if (Type *ExtTy = isBinExtWideningInstruction(Opcode, Ty, Args)) {
4309 if (ExtTy != Ty)
4310 return getArithmeticInstrCost(Opcode, ExtTy, CostKind) +
4311 getCastInstrCost(Instruction::ZExt, Ty, ExtTy,
4313 return LT.first;
4314 }
4315
4316 switch (ISD) {
4317 default:
4318 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
4319 Op2Info);
4320 case ISD::SREM:
4321 case ISD::SDIV:
4322 /*
4323 Notes for sdiv/srem specific costs:
4324 1. This only considers the cases where the divisor is constant, uniform and
4325 (pow-of-2/non-pow-of-2). Other cases are not important since they either
4326 result in some form of (ldr + adrp), corresponding to constant vectors, or
4327 scalarization of the division operation.
4328 2. Constant divisors, either negative in whole or partially, don't result in
4329 significantly different codegen as compared to positive constant divisors.
4330 So, we don't consider negative divisors separately.
4331 3. If the codegen is significantly different with SVE, it has been indicated
4332 using comments at appropriate places.
4333
4334 sdiv specific cases:
4335 -----------------------------------------------------------------------
4336 codegen | pow-of-2 | Type
4337 -----------------------------------------------------------------------
4338 add + cmp + csel + asr | Y | i64
4339 add + cmp + csel + asr | Y | i32
4340 -----------------------------------------------------------------------
4341
4342 srem specific cases:
4343 -----------------------------------------------------------------------
4344 codegen | pow-of-2 | Type
4345 -----------------------------------------------------------------------
4346 negs + and + and + csneg | Y | i64
4347 negs + and + and + csneg | Y | i32
4348 -----------------------------------------------------------------------
4349
4350 other sdiv/srem cases:
4351 -------------------------------------------------------------------------
4352 common codegen | + srem | + sdiv | pow-of-2 | Type
4353 -------------------------------------------------------------------------
4354 smulh + asr + add + add | - | - | N | i64
4355 smull + lsr + add + add | - | - | N | i32
4356 usra | and + sub | sshr | Y | <2 x i64>
4357 2 * (scalar code) | - | - | N | <2 x i64>
4358 usra | bic + sub | sshr + neg | Y | <4 x i32>
4359 smull2 + smull + uzp2 | mls | - | N | <4 x i32>
4360 + sshr + usra | | | |
4361 -------------------------------------------------------------------------
4362 */
4363 if (Op2Info.isConstant() && Op2Info.isUniform()) {
4364 InstructionCost AddCost =
4365 getArithmeticInstrCost(Instruction::Add, Ty, CostKind,
4366 Op1Info.getNoProps(), Op2Info.getNoProps());
4367 InstructionCost AsrCost =
4368 getArithmeticInstrCost(Instruction::AShr, Ty, CostKind,
4369 Op1Info.getNoProps(), Op2Info.getNoProps());
4370 InstructionCost MulCost =
4371 getArithmeticInstrCost(Instruction::Mul, Ty, CostKind,
4372 Op1Info.getNoProps(), Op2Info.getNoProps());
4373 // add/cmp/csel/csneg should have similar cost while asr/negs/and should
4374 // have similar cost.
4375 auto VT = TLI->getValueType(DL, Ty);
4376 if (VT.isScalarInteger() && VT.getSizeInBits() <= 64) {
4377 if (Op2Info.isPowerOf2() || Op2Info.isNegatedPowerOf2()) {
4378 // Neg can be folded into the asr instruction.
4379 return ISD == ISD::SDIV ? (3 * AddCost + AsrCost)
4380 : (3 * AsrCost + AddCost);
4381 } else {
4382 return MulCost + AsrCost + 2 * AddCost;
4383 }
4384 } else if (VT.isVector()) {
4385 InstructionCost UsraCost = 2 * AsrCost;
4386 if (Op2Info.isPowerOf2() || Op2Info.isNegatedPowerOf2()) {
4387 // Division with scalable types corresponds to native 'asrd'
4388 // instruction when SVE is available.
4389 // e.g. %1 = sdiv <vscale x 4 x i32> %a, splat (i32 8)
4390
4391 // One more for the negation in SDIV
4393 (Op2Info.isNegatedPowerOf2() && ISD == ISD::SDIV) ? AsrCost : 0;
4394 if (Ty->isScalableTy() && ST->hasSVE())
4395 Cost += 2 * AsrCost;
4396 else {
4397 Cost +=
4398 UsraCost +
4399 (ISD == ISD::SDIV
4400 ? (LT.second.getScalarType() == MVT::i64 ? 1 : 2) * AsrCost
4401 : 2 * AddCost);
4402 }
4403 return Cost;
4404 } else if (LT.second == MVT::v2i64) {
4405 return VT.getVectorNumElements() *
4406 getArithmeticInstrCost(Opcode, Ty->getScalarType(), CostKind,
4407 Op1Info.getNoProps(),
4408 Op2Info.getNoProps());
4409 } else {
4410 // When SVE is available, we get:
4411 // smulh + lsr + add/sub + asr + add/sub.
4412 if (Ty->isScalableTy() && ST->hasSVE())
4413 return MulCost /*smulh cost*/ + 2 * AddCost + 2 * AsrCost;
4414 return 2 * MulCost + AddCost /*uzp2 cost*/ + AsrCost + UsraCost;
4415 }
4416 }
4417 }
4418 if (Op2Info.isConstant() && !Op2Info.isUniform() &&
4419 LT.second.isFixedLengthVector()) {
4420 // FIXME: When the constant vector is non-uniform, this may result in
4421 // loading the vector from constant pool or in some cases, may also result
4422 // in scalarization. For now, we are approximating this with the
4423 // scalarization cost.
4424 auto ExtractCost = 2 * getVectorInstrCost(Instruction::ExtractElement, Ty,
4425 CostKind, -1, nullptr, nullptr);
4426 auto InsertCost = getVectorInstrCost(Instruction::InsertElement, Ty,
4427 CostKind, -1, nullptr, nullptr);
4428 unsigned NElts = cast<FixedVectorType>(Ty)->getNumElements();
4429 return ExtractCost + InsertCost +
4430 NElts * getArithmeticInstrCost(Opcode, Ty->getScalarType(),
4431 CostKind, Op1Info.getNoProps(),
4432 Op2Info.getNoProps());
4433 }
4434 [[fallthrough]];
4435 case ISD::UDIV:
4436 case ISD::UREM: {
4437 auto VT = TLI->getValueType(DL, Ty);
4438 if (Op2Info.isConstant()) {
4439 // If the operand is a power of 2 we can use the shift or and cost.
4440 if (ISD == ISD::UDIV && Op2Info.isPowerOf2())
4441 return getArithmeticInstrCost(Instruction::LShr, Ty, CostKind,
4442 Op1Info.getNoProps(),
4443 Op2Info.getNoProps());
4444 if (ISD == ISD::UREM && Op2Info.isPowerOf2())
4445 return getArithmeticInstrCost(Instruction::And, Ty, CostKind,
4446 Op1Info.getNoProps(),
4447 Op2Info.getNoProps());
4448
4449 if (ISD == ISD::UDIV || ISD == ISD::UREM) {
4450 // Divides by a constant are expanded to MULHU + SUB + SRL + ADD + SRL.
4451 // The MULHU will be expanded to UMULL for the types not listed below,
4452 // and will become a pair of UMULL+MULL2 for 128bit vectors.
4453 bool HasMULH = VT == MVT::i64 || LT.second == MVT::nxv2i64 ||
4454 LT.second == MVT::nxv4i32 || LT.second == MVT::nxv8i16 ||
4455 LT.second == MVT::nxv16i8;
4456 bool Is128bit = LT.second.is128BitVector();
4457
4458 InstructionCost MulCost =
4459 getArithmeticInstrCost(Instruction::Mul, Ty, CostKind,
4460 Op1Info.getNoProps(), Op2Info.getNoProps());
4461 InstructionCost AddCost =
4462 getArithmeticInstrCost(Instruction::Add, Ty, CostKind,
4463 Op1Info.getNoProps(), Op2Info.getNoProps());
4464 InstructionCost ShrCost =
4465 getArithmeticInstrCost(Instruction::AShr, Ty, CostKind,
4466 Op1Info.getNoProps(), Op2Info.getNoProps());
4467 InstructionCost DivCost = MulCost * (Is128bit ? 2 : 1) + // UMULL/UMULH
4468 (HasMULH ? 0 : ShrCost) + // UMULL shift
4469 AddCost * 2 + ShrCost;
4470 return DivCost + (ISD == ISD::UREM ? MulCost + AddCost : 0);
4471 }
4472 }
4473
4474 // div i128's are lowered as libcalls. Pass nullptr as (u)divti3 calls are
4475 // emitted by the backend even when those functions are not declared in the
4476 // module.
4477 if (!VT.isVector() && VT.getSizeInBits() > 64)
4478 return getCallInstrCost(/*Function*/ nullptr, Ty, {Ty, Ty}, CostKind);
4479
4481 Opcode, Ty, CostKind, Op1Info, Op2Info);
4482 if (Ty->isVectorTy() && (ISD == ISD::SDIV || ISD == ISD::UDIV)) {
4483 if (TLI->isOperationLegalOrCustom(ISD, LT.second) && ST->hasSVE()) {
4484 // SDIV/UDIV operations are lowered using SVE, then we can have less
4485 // costs.
4486 if (VT.isSimple() && isa<FixedVectorType>(Ty) &&
4487 Ty->getPrimitiveSizeInBits().getFixedValue() < 128) {
4488 static const CostTblEntry DivTbl[]{
4489 {ISD::SDIV, MVT::v2i8, 5}, {ISD::SDIV, MVT::v4i8, 8},
4490 {ISD::SDIV, MVT::v8i8, 8}, {ISD::SDIV, MVT::v2i16, 5},
4491 {ISD::SDIV, MVT::v4i16, 5}, {ISD::SDIV, MVT::v2i32, 1},
4492 {ISD::UDIV, MVT::v2i8, 5}, {ISD::UDIV, MVT::v4i8, 8},
4493 {ISD::UDIV, MVT::v8i8, 8}, {ISD::UDIV, MVT::v2i16, 5},
4494 {ISD::UDIV, MVT::v4i16, 5}, {ISD::UDIV, MVT::v2i32, 1}};
4495
4496 const auto *Entry = CostTableLookup(DivTbl, ISD, VT.getSimpleVT());
4497 if (nullptr != Entry)
4498 return Entry->Cost;
4499 }
4500 // For 8/16-bit elements, the cost is higher because the type
4501 // requires promotion and possibly splitting:
4502 if (LT.second.getScalarType() == MVT::i8)
4503 Cost *= 8;
4504 else if (LT.second.getScalarType() == MVT::i16)
4505 Cost *= 4;
4506 return Cost;
4507 } else {
4508 // If one of the operands is a uniform constant then the cost for each
4509 // element is Cost for insertion, extraction and division.
4510 // Insertion cost = 2, Extraction Cost = 2, Division = cost for the
4511 // operation with scalar type
4512 if ((Op1Info.isConstant() && Op1Info.isUniform()) ||
4513 (Op2Info.isConstant() && Op2Info.isUniform())) {
4514 if (auto *VTy = dyn_cast<FixedVectorType>(Ty)) {
4516 Opcode, Ty->getScalarType(), CostKind, Op1Info, Op2Info);
4517 return (4 + DivCost) * VTy->getNumElements();
4518 }
4519 }
4520 // On AArch64, without SVE, vector divisions are expanded
4521 // into scalar divisions of each pair of elements.
4522 Cost += getVectorInstrCost(Instruction::ExtractElement, Ty, CostKind,
4523 -1, nullptr, nullptr);
4524 Cost += getVectorInstrCost(Instruction::InsertElement, Ty, CostKind, -1,
4525 nullptr, nullptr);
4526 }
4527
4528 // TODO: if one of the arguments is scalar, then it's not necessary to
4529 // double the cost of handling the vector elements.
4530 Cost += Cost;
4531 }
4532 return Cost;
4533 }
4534 case ISD::MUL:
4535 // When SVE is available, then we can lower the v2i64 operation using
4536 // the SVE mul instruction, which has a lower cost.
4537 if (LT.second == MVT::v2i64 && ST->hasSVE())
4538 return LT.first;
4539
4540 // When SVE is not available, there is no MUL.2d instruction,
4541 // which means mul <2 x i64> is expensive as elements are extracted
4542 // from the vectors and the muls scalarized.
4543 // As getScalarizationOverhead is a bit too pessimistic, we
4544 // estimate the cost for a i64 vector directly here, which is:
4545 // - four 2-cost i64 extracts,
4546 // - two 2-cost i64 inserts, and
4547 // - two 1-cost muls.
4548 // So, for a v2i64 with LT.First = 1 the cost is 14, and for a v4i64 with
4549 // LT.first = 2 the cost is 28.
4550 if (LT.second != MVT::v2i64)
4551 return LT.first;
4552 return cast<VectorType>(Ty)->getElementCount().getKnownMinValue() *
4553 (getArithmeticInstrCost(Opcode, Ty->getScalarType(), CostKind) +
4554 getVectorInstrCost(Instruction::ExtractElement, Ty, CostKind, -1,
4555 nullptr, nullptr) *
4556 2 +
4557 getVectorInstrCost(Instruction::InsertElement, Ty, CostKind, -1,
4558 nullptr, nullptr));
4559 case ISD::ADD:
4560 case ISD::XOR:
4561 case ISD::OR:
4562 case ISD::AND:
4563 case ISD::SRL:
4564 case ISD::SRA:
4565 case ISD::SHL:
4566 // These nodes are marked as 'custom' for combining purposes only.
4567 // We know that they are legal. See LowerAdd in ISelLowering.
4568 return LT.first;
4569
4570 case ISD::FNEG:
4571 // Scalar fmul(fneg) or fneg(fmul) can be converted to fnmul
4572 if ((Ty->isFloatTy() || Ty->isDoubleTy() ||
4573 (Ty->isHalfTy() && ST->hasFullFP16())) &&
4574 CxtI &&
4575 ((CxtI->hasOneUse() &&
4576 match(*CxtI->user_begin(), m_FMul(m_Value(), m_Value()))) ||
4577 match(CxtI->getOperand(0), m_FMul(m_Value(), m_Value()))))
4578 return 0;
4579 [[fallthrough]];
4580 case ISD::FADD:
4581 case ISD::FSUB:
4582 if (!Ty->getScalarType()->isFP128Ty())
4583 return LT.first;
4584 [[fallthrough]];
4585 case ISD::FMUL:
4586 case ISD::FDIV:
4587 // These nodes are marked as 'custom' just to lower them to SVE.
4588 // We know said lowering will incur no additional cost.
4589 if (!Ty->getScalarType()->isFP128Ty())
4590 return 2 * LT.first;
4591
4592 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
4593 Op2Info);
4594 case ISD::FREM:
4595 // Pass nullptr as fmod/fmodf calls are emitted by the backend even when
4596 // those functions are not declared in the module.
4597 if (!Ty->isVectorTy())
4598 return getCallInstrCost(/*Function*/ nullptr, Ty, {Ty, Ty}, CostKind);
4599 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
4600 Op2Info);
4601 }
4602}
4603
4606 const SCEV *Ptr,
4608 // Address computations in vectorized code with non-consecutive addresses will
4609 // likely result in more instructions compared to scalar code where the
4610 // computation can more often be merged into the index mode. The resulting
4611 // extra micro-ops can significantly decrease throughput.
4612 unsigned NumVectorInstToHideOverhead = NeonNonConstStrideOverhead;
4613 int MaxMergeDistance = 64;
4614
4615 if (PtrTy->isVectorTy() && SE &&
4616 !BaseT::isConstantStridedAccessLessThan(SE, Ptr, MaxMergeDistance + 1))
4617 return NumVectorInstToHideOverhead;
4618
4619 // In many cases the address computation is not merged into the instruction
4620 // addressing mode.
4621 return 1;
4622}
4623
4624/// Check whether Opcode1 has less throughput according to the scheduling
4625/// model than Opcode2.
4627 unsigned Opcode1, unsigned Opcode2) const {
4628 const MCSchedModel &Sched = ST->getSchedModel();
4629 const TargetInstrInfo *TII = ST->getInstrInfo();
4630 if (!Sched.hasInstrSchedModel())
4631 return false;
4632
4633 const MCSchedClassDesc *SCD1 =
4634 Sched.getSchedClassDesc(TII->get(Opcode1).getSchedClass());
4635 const MCSchedClassDesc *SCD2 =
4636 Sched.getSchedClassDesc(TII->get(Opcode2).getSchedClass());
4637 // We cannot handle variant scheduling classes without an MI. If we need to
4638 // support them for any of the instructions we query the information of we
4639 // might need to add a way to resolve them without a MI or not use the
4640 // scheduling info.
4641 assert(!SCD1->isVariant() && !SCD2->isVariant() &&
4642 "Cannot handle variant scheduling classes without an MI");
4643 if (!SCD1->isValid() || !SCD2->isValid())
4644 return false;
4645
4646 return MCSchedModel::getReciprocalThroughput(*ST, *SCD1) >
4648}
4649
4651 unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred,
4653 TTI::OperandValueInfo Op2Info, const Instruction *I) const {
4654 // We don't lower some vector selects well that are wider than the register
4655 // width. TODO: Improve this with different cost kinds.
4656 if (isa<FixedVectorType>(ValTy) && Opcode == Instruction::Select) {
4657 // We would need this many instructions to hide the scalarization happening.
4658 const int AmortizationCost = 20;
4659
4660 // If VecPred is not set, check if we can get a predicate from the context
4661 // instruction, if its type matches the requested ValTy.
4662 if (VecPred == CmpInst::BAD_ICMP_PREDICATE && I && I->getType() == ValTy) {
4663 CmpPredicate CurrentPred;
4664 if (match(I, m_Select(m_Cmp(CurrentPred, m_Value(), m_Value()), m_Value(),
4665 m_Value())))
4666 VecPred = CurrentPred;
4667 }
4668 // Check if we have a compare/select chain that can be lowered using
4669 // a (F)CMxx & BFI pair.
4670 if (CmpInst::isIntPredicate(VecPred) || VecPred == CmpInst::FCMP_OLE ||
4671 VecPred == CmpInst::FCMP_OLT || VecPred == CmpInst::FCMP_OGT ||
4672 VecPred == CmpInst::FCMP_OGE || VecPred == CmpInst::FCMP_OEQ ||
4673 VecPred == CmpInst::FCMP_UNE) {
4674 static const auto ValidMinMaxTys = {
4675 MVT::v8i8, MVT::v16i8, MVT::v4i16, MVT::v8i16, MVT::v2i32,
4676 MVT::v4i32, MVT::v2i64, MVT::v2f32, MVT::v4f32, MVT::v2f64};
4677 static const auto ValidFP16MinMaxTys = {MVT::v4f16, MVT::v8f16};
4678
4679 auto LT = getTypeLegalizationCost(ValTy);
4680 if (any_of(ValidMinMaxTys, equal_to(LT.second)) ||
4681 (ST->hasFullFP16() &&
4682 any_of(ValidFP16MinMaxTys, equal_to(LT.second))))
4683 return LT.first;
4684 }
4685
4686 static const TypeConversionCostTblEntry VectorSelectTbl[] = {
4687 {Instruction::Select, MVT::v2i1, MVT::v2f32, 2},
4688 {Instruction::Select, MVT::v2i1, MVT::v2f64, 2},
4689 {Instruction::Select, MVT::v4i1, MVT::v4f32, 2},
4690 {Instruction::Select, MVT::v4i1, MVT::v4f16, 2},
4691 {Instruction::Select, MVT::v8i1, MVT::v8f16, 2},
4692 {Instruction::Select, MVT::v16i1, MVT::v16i16, 16},
4693 {Instruction::Select, MVT::v8i1, MVT::v8i32, 8},
4694 {Instruction::Select, MVT::v16i1, MVT::v16i32, 16},
4695 {Instruction::Select, MVT::v4i1, MVT::v4i64, 4 * AmortizationCost},
4696 {Instruction::Select, MVT::v8i1, MVT::v8i64, 8 * AmortizationCost},
4697 {Instruction::Select, MVT::v16i1, MVT::v16i64, 16 * AmortizationCost}};
4698
4699 EVT SelCondTy = TLI->getValueType(DL, CondTy);
4700 EVT SelValTy = TLI->getValueType(DL, ValTy);
4701 if (SelCondTy.isSimple() && SelValTy.isSimple()) {
4702 if (const auto *Entry = ConvertCostTableLookup(VectorSelectTbl, Opcode,
4703 SelCondTy.getSimpleVT(),
4704 SelValTy.getSimpleVT()))
4705 return Entry->Cost;
4706 }
4707 }
4708
4709 if (Opcode == Instruction::FCmp) {
4710 if (auto PromotedCost = getFP16BF16PromoteCost(
4711 ValTy, CostKind, Op1Info, Op2Info, /*IncludeTrunc=*/false,
4712 // TODO: Consider costing SVE FCMPs.
4713 /*CanUseSVE=*/false, [&](Type *PromotedTy) {
4715 getCmpSelInstrCost(Opcode, PromotedTy, CondTy, VecPred,
4716 CostKind, Op1Info, Op2Info);
4717 if (isa<VectorType>(PromotedTy))
4719 Instruction::Trunc,
4723 return Cost;
4724 }))
4725 return *PromotedCost;
4726
4727 auto LT = getTypeLegalizationCost(ValTy);
4728 // Model unknown fp compares as a libcall.
4729 if (LT.second.getScalarType() != MVT::f64 &&
4730 LT.second.getScalarType() != MVT::f32 &&
4731 LT.second.getScalarType() != MVT::f16)
4732 return LT.first * getCallInstrCost(/*Function*/ nullptr, ValTy,
4733 {ValTy, ValTy}, CostKind);
4734
4735 // Some comparison operators require expanding to multiple compares + or.
4736 unsigned Factor = 1;
4737 if (!CondTy->isVectorTy() &&
4738 (VecPred == FCmpInst::FCMP_ONE || VecPred == FCmpInst::FCMP_UEQ))
4739 Factor = 2; // fcmp with 2 selects
4740 else if (isa<FixedVectorType>(ValTy) &&
4741 (VecPred == FCmpInst::FCMP_ONE || VecPred == FCmpInst::FCMP_UEQ ||
4742 VecPred == FCmpInst::FCMP_ORD || VecPred == FCmpInst::FCMP_UNO))
4743 Factor = 3; // fcmxx+fcmyy+or
4744 else if (isa<ScalableVectorType>(ValTy) &&
4745 (VecPred == FCmpInst::FCMP_ONE || VecPred == FCmpInst::FCMP_UEQ))
4746 Factor = 3; // fcmxx+fcmyy+or
4747
4748 if (isa<ScalableVectorType>(ValTy) &&
4750 hasKnownLowerThroughputFromSchedulingModel(AArch64::FCMEQ_PPzZZ_S,
4751 AArch64::FCMEQv4f32))
4752 Factor *= 2;
4753
4754 return Factor * (CostKind == TTI::TCK_Latency ? 2 : LT.first);
4755 }
4756
4757 // Treat the icmp in icmp(and, 0) or icmp(and, -1/1) when it can be folded to
4758 // icmp(and, 0) as free, as we can make use of ands, but only if the
4759 // comparison is not unsigned. FIXME: Enable for non-throughput cost kinds
4760 // providing it will not cause performance regressions.
4761 if (CostKind == TTI::TCK_RecipThroughput && ValTy->isIntegerTy() &&
4762 Opcode == Instruction::ICmp && I && !CmpInst::isUnsigned(VecPred) &&
4763 TLI->isTypeLegal(TLI->getValueType(DL, ValTy)) &&
4764 match(I->getOperand(0), m_And(m_Value(), m_Value()))) {
4765 if (match(I->getOperand(1), m_Zero()))
4766 return 0;
4767
4768 // x >= 1 / x < 1 -> x > 0 / x <= 0
4769 if (match(I->getOperand(1), m_One()) &&
4770 (VecPred == CmpInst::ICMP_SLT || VecPred == CmpInst::ICMP_SGE))
4771 return 0;
4772
4773 // x <= -1 / x > -1 -> x > 0 / x <= 0
4774 if (match(I->getOperand(1), m_AllOnes()) &&
4775 (VecPred == CmpInst::ICMP_SLE || VecPred == CmpInst::ICMP_SGT))
4776 return 0;
4777 }
4778
4779 // The base case handles scalable vectors fine for now, since it treats the
4780 // cost as 1 * legalization cost.
4781 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
4782 Op1Info, Op2Info, I);
4783}
4784
4786AArch64TTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
4788 if (ST->requiresStrictAlign()) {
4789 // TODO: Add cost modeling for strict align. Misaligned loads expand to
4790 // a bunch of instructions when strict align is enabled.
4791 return Options;
4792 }
4793 Options.AllowOverlappingLoads = true;
4794 Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize);
4795 Options.NumLoadsPerBlock = Options.MaxNumLoads;
4796 // TODO: Though vector loads usually perform well on AArch64, in some targets
4797 // they may wake up the FP unit, which raises the power consumption. Perhaps
4798 // they could be used with no holds barred (-O3).
4799 Options.LoadSizes = {8, 4, 2, 1};
4800 Options.AllowedTailExpansions = {3, 5, 6};
4801 return Options;
4802}
4803
4805 return ST->hasSVE();
4806}
4807
4811 switch (MICA.getID()) {
4812 case Intrinsic::masked_scatter:
4813 case Intrinsic::masked_gather:
4814 return getGatherScatterOpCost(MICA, CostKind);
4815 case Intrinsic::masked_load:
4816 case Intrinsic::masked_store:
4817 return getMaskedMemoryOpCost(MICA, CostKind);
4818 }
4820}
4821
4825 Type *Src = MICA.getDataType();
4826
4827 if (useNeonVector(Src))
4829 auto LT = getTypeLegalizationCost(Src);
4830 if (!LT.first.isValid())
4832
4833 // Return an invalid cost for element types that we are unable to lower.
4834 auto *VT = cast<VectorType>(Src);
4835 if (VT->getElementType()->isIntegerTy(1))
4837
4838 // The code-generator is currently not able to handle scalable vectors
4839 // of <vscale x 1 x eltty> yet, so return an invalid cost to avoid selecting
4840 // it. This change will be removed when code-generation for these types is
4841 // sufficiently reliable.
4842 if (VT->getElementCount() == ElementCount::getScalable(1))
4844
4845 return LT.first;
4846}
4847
4848// This function returns gather/scatter overhead either from
4849// user-provided value or specialized values per-target from \p ST.
4850static unsigned getSVEGatherScatterOverhead(unsigned Opcode,
4851 const AArch64Subtarget *ST) {
4852 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
4853 "Should be called on only load or stores.");
4854 switch (Opcode) {
4855 case Instruction::Load:
4856 if (SVEGatherOverhead.getNumOccurrences() > 0)
4857 return SVEGatherOverhead;
4858 return ST->getGatherOverhead();
4859 break;
4860 case Instruction::Store:
4861 if (SVEScatterOverhead.getNumOccurrences() > 0)
4862 return SVEScatterOverhead;
4863 return ST->getScatterOverhead();
4864 break;
4865 default:
4866 llvm_unreachable("Shouldn't have reached here");
4867 }
4868}
4869
4873
4874 unsigned Opcode = (MICA.getID() == Intrinsic::masked_gather ||
4875 MICA.getID() == Intrinsic::vp_gather)
4876 ? Instruction::Load
4877 : Instruction::Store;
4878
4879 Type *DataTy = MICA.getDataType();
4880 Align Alignment = MICA.getAlignment();
4881 const Instruction *I = MICA.getInst();
4882
4883 if (useNeonVector(DataTy) || !isLegalMaskedGatherScatter(DataTy))
4885 auto *VT = cast<VectorType>(DataTy);
4886 auto LT = getTypeLegalizationCost(DataTy);
4887 if (!LT.first.isValid())
4889
4890 // Return an invalid cost for element types that we are unable to lower.
4891 if (!LT.second.isVector() ||
4892 !isElementTypeLegalForScalableVector(VT->getElementType()) ||
4893 VT->getElementType()->isIntegerTy(1))
4895
4896 // The code-generator is currently not able to handle scalable vectors
4897 // of <vscale x 1 x eltty> yet, so return an invalid cost to avoid selecting
4898 // it. This change will be removed when code-generation for these types is
4899 // sufficiently reliable.
4900 if (VT->getElementCount() == ElementCount::getScalable(1))
4902
4903 ElementCount LegalVF = LT.second.getVectorElementCount();
4904 InstructionCost MemOpCost =
4905 getMemoryOpCost(Opcode, VT->getElementType(), Alignment, 0, CostKind,
4906 {TTI::OK_AnyValue, TTI::OP_None}, I);
4907 // Add on an overhead cost for using gathers/scatters.
4908 MemOpCost *= getSVEGatherScatterOverhead(Opcode, ST);
4909 return LT.first * MemOpCost * getMaxNumElements(LegalVF);
4910}
4911
4913 return isa<FixedVectorType>(Ty) && !ST->useSVEForFixedLengthVectors();
4914}
4915
4917 Align Alignment,
4918 unsigned AddressSpace,
4920 TTI::OperandValueInfo OpInfo,
4921 const Instruction *I) const {
4922 EVT VT = TLI->getValueType(DL, Ty, true);
4923 // Type legalization can't handle structs
4924 if (VT == MVT::Other)
4925 return BaseT::getMemoryOpCost(Opcode, Ty, Alignment, AddressSpace,
4926 CostKind);
4927
4928 auto LT = getTypeLegalizationCost(Ty);
4929 if (!LT.first.isValid())
4931
4932 // The code-generator is currently not able to handle scalable vectors
4933 // of <vscale x 1 x eltty> yet, so return an invalid cost to avoid selecting
4934 // it. This change will be removed when code-generation for these types is
4935 // sufficiently reliable.
4936 // We also only support full register predicate loads and stores.
4937 if (auto *VTy = dyn_cast<ScalableVectorType>(Ty))
4938 if (VTy->getElementCount() == ElementCount::getScalable(1) ||
4939 (VTy->getElementType()->isIntegerTy(1) &&
4940 !VTy->getElementCount().isKnownMultipleOf(
4943
4944 // TODO: consider latency as well for TCK_SizeAndLatency.
4946 return LT.first;
4947
4949 return 1;
4950
4951 if (ST->isMisaligned128StoreSlow() && Opcode == Instruction::Store &&
4952 LT.second.is128BitVector() && Alignment < Align(16)) {
4953 // Unaligned stores are extremely inefficient. We don't split all
4954 // unaligned 128-bit stores because the negative impact that has shown in
4955 // practice on inlined block copy code.
4956 // We make such stores expensive so that we will only vectorize if there
4957 // are 6 other instructions getting vectorized.
4958 const int AmortizationCost = 6;
4959
4960 return LT.first * 2 * AmortizationCost;
4961 }
4962
4963 // Opaque ptr or ptr vector types are i64s and can be lowered to STP/LDPs.
4964 if (Ty->isPtrOrPtrVectorTy())
4965 return LT.first;
4966
4967 if (useNeonVector(Ty)) {
4968 // Check truncating stores and extending loads.
4969 if (Ty->getScalarSizeInBits() != LT.second.getScalarSizeInBits()) {
4970 // v4i8 types are lowered to scalar a load/store and sshll/xtn.
4971 if (VT == MVT::v4i8)
4972 return 2;
4973 // Otherwise we need to scalarize.
4974 return cast<FixedVectorType>(Ty)->getNumElements() * 2;
4975 }
4976 EVT EltVT = VT.getVectorElementType();
4977 unsigned EltSize = EltVT.getScalarSizeInBits();
4978 if (!isPowerOf2_32(EltSize) || EltSize < 8 || EltSize > 64 ||
4979 VT.getVectorNumElements() >= (128 / EltSize) || Alignment != Align(1))
4980 return LT.first;
4981 // FIXME: v3i8 lowering currently is very inefficient, due to automatic
4982 // widening to v4i8, which produces suboptimal results.
4983 if (VT.getVectorNumElements() == 3 && EltVT == MVT::i8)
4984 return LT.first;
4985
4986 // Check non-power-of-2 loads/stores for legal vector element types with
4987 // NEON. Non-power-of-2 memory ops will get broken down to a set of
4988 // operations on smaller power-of-2 ops, including ld1/st1.
4989 LLVMContext &C = Ty->getContext();
4991 SmallVector<EVT> TypeWorklist;
4992 TypeWorklist.push_back(VT);
4993 while (!TypeWorklist.empty()) {
4994 EVT CurrVT = TypeWorklist.pop_back_val();
4995 unsigned CurrNumElements = CurrVT.getVectorNumElements();
4996 if (isPowerOf2_32(CurrNumElements)) {
4997 Cost += 1;
4998 continue;
4999 }
5000
5001 unsigned PrevPow2 = NextPowerOf2(CurrNumElements) / 2;
5002 TypeWorklist.push_back(EVT::getVectorVT(C, EltVT, PrevPow2));
5003 TypeWorklist.push_back(
5004 EVT::getVectorVT(C, EltVT, CurrNumElements - PrevPow2));
5005 }
5006 return Cost;
5007 }
5008
5009 return LT.first;
5010}
5011
5013 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
5014 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
5015 bool UseMaskForCond, bool UseMaskForGaps) const {
5016 assert(Factor >= 2 && "Invalid interleave factor");
5017 auto *VecVTy = cast<VectorType>(VecTy);
5018
5019 if (VecTy->isScalableTy() && !ST->hasSVE())
5021
5022 // Scalable VFs will emit vector.[de]interleave intrinsics, and currently we
5023 // only have lowering for power-of-2 factors.
5024 // TODO: Add lowering for vector.[de]interleave3 intrinsics and support in
5025 // InterleavedAccessPass for ld3/st3
5026 if (VecTy->isScalableTy() && !isPowerOf2_32(Factor))
5028
5029 // Vectorization for masked interleaved accesses is only enabled for scalable
5030 // VF.
5031 if (!VecTy->isScalableTy() && (UseMaskForCond || UseMaskForGaps))
5033
5034 if (!UseMaskForGaps && Factor <= TLI->getMaxSupportedInterleaveFactor()) {
5035 unsigned MinElts = VecVTy->getElementCount().getKnownMinValue();
5036 auto *SubVecTy =
5037 VectorType::get(VecVTy->getElementType(),
5038 VecVTy->getElementCount().divideCoefficientBy(Factor));
5039
5040 // ldN/stN only support legal vector types of size 64 or 128 in bits.
5041 // Accesses having vector types that are a multiple of 128 bits can be
5042 // matched to more than one ldN/stN instruction.
5043 bool UseScalable;
5044 if (MinElts % Factor == 0 &&
5045 TLI->isLegalInterleavedAccessType(SubVecTy, DL, UseScalable))
5046 return Factor * TLI->getNumInterleavedAccesses(SubVecTy, DL, UseScalable);
5047 }
5048
5049 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
5050 Alignment, AddressSpace, CostKind,
5051 UseMaskForCond, UseMaskForGaps);
5052}
5053
5058 for (auto *I : Tys) {
5059 if (!I->isVectorTy())
5060 continue;
5061 if (I->getScalarSizeInBits() * cast<FixedVectorType>(I)->getNumElements() ==
5062 128)
5063 Cost += getMemoryOpCost(Instruction::Store, I, Align(128), 0, CostKind) +
5064 getMemoryOpCost(Instruction::Load, I, Align(128), 0, CostKind);
5065 }
5066 return Cost;
5067}
5068
5070 return ST->getMaxInterleaveFactor();
5071}
5072
5073// For Falkor, we want to avoid having too many strided loads in a loop since
5074// that can exhaust the HW prefetcher resources. We adjust the unroller
5075// MaxCount preference below to attempt to ensure unrolling doesn't create too
5076// many strided loads.
5077static void
5080 enum { MaxStridedLoads = 7 };
5081 auto countStridedLoads = [](Loop *L, ScalarEvolution &SE) {
5082 int StridedLoads = 0;
5083 // FIXME? We could make this more precise by looking at the CFG and
5084 // e.g. not counting loads in each side of an if-then-else diamond.
5085 for (const auto BB : L->blocks()) {
5086 for (auto &I : *BB) {
5087 LoadInst *LMemI = dyn_cast<LoadInst>(&I);
5088 if (!LMemI)
5089 continue;
5090
5091 Value *PtrValue = LMemI->getPointerOperand();
5092 if (L->isLoopInvariant(PtrValue))
5093 continue;
5094
5095 const SCEV *LSCEV = SE.getSCEV(PtrValue);
5096 const SCEVAddRecExpr *LSCEVAddRec = dyn_cast<SCEVAddRecExpr>(LSCEV);
5097 if (!LSCEVAddRec || !LSCEVAddRec->isAffine())
5098 continue;
5099
5100 // FIXME? We could take pairing of unrolled load copies into account
5101 // by looking at the AddRec, but we would probably have to limit this
5102 // to loops with no stores or other memory optimization barriers.
5103 ++StridedLoads;
5104 // We've seen enough strided loads that seeing more won't make a
5105 // difference.
5106 if (StridedLoads > MaxStridedLoads / 2)
5107 return StridedLoads;
5108 }
5109 }
5110 return StridedLoads;
5111 };
5112
5113 int StridedLoads = countStridedLoads(L, SE);
5114 LLVM_DEBUG(dbgs() << "falkor-hwpf: detected " << StridedLoads
5115 << " strided loads\n");
5116 // Pick the largest power of 2 unroll count that won't result in too many
5117 // strided loads.
5118 if (StridedLoads) {
5119 UP.MaxCount = 1 << Log2_32(MaxStridedLoads / StridedLoads);
5120 LLVM_DEBUG(dbgs() << "falkor-hwpf: setting unroll MaxCount to "
5121 << UP.MaxCount << '\n');
5122 }
5123}
5124
5125// This function returns true if the loop:
5126// 1. Has a valid cost, and
5127// 2. Has a cost within the supplied budget.
5128// Otherwise it returns false.
5130 InstructionCost Budget,
5131 unsigned *FinalSize) {
5132 // Estimate the size of the loop.
5133 InstructionCost LoopCost = 0;
5134
5135 for (auto *BB : L->getBlocks()) {
5136 for (auto &I : *BB) {
5137 SmallVector<const Value *, 4> Operands(I.operand_values());
5138 InstructionCost Cost =
5139 TTI.getInstructionCost(&I, Operands, TTI::TCK_CodeSize);
5140 // This can happen with intrinsics that don't currently have a cost model
5141 // or for some operations that require SVE.
5142 if (!Cost.isValid())
5143 return false;
5144
5145 LoopCost += Cost;
5146 if (LoopCost > Budget)
5147 return false;
5148 }
5149 }
5150
5151 if (FinalSize)
5152 *FinalSize = LoopCost.getValue();
5153 return true;
5154}
5155
5157 const AArch64TTIImpl &TTI) {
5158 // Only consider loops with unknown trip counts for which we can determine
5159 // a symbolic expression. Multi-exit loops with small known trip counts will
5160 // likely be unrolled anyway.
5161 const SCEV *BTC = SE.getSymbolicMaxBackedgeTakenCount(L);
5163 return false;
5164
5165 // It might not be worth unrolling loops with low max trip counts. Restrict
5166 // this to max trip counts > 32 for now.
5167 unsigned MaxTC = SE.getSmallConstantMaxTripCount(L);
5168 if (MaxTC > 0 && MaxTC <= 32)
5169 return false;
5170
5171 // Make sure the loop size is <= 5.
5172 if (!isLoopSizeWithinBudget(L, TTI, 5, nullptr))
5173 return false;
5174
5175 // Small search loops with multiple exits can be highly beneficial to unroll.
5176 // We only care about loops with exactly two exiting blocks, although each
5177 // block could jump to the same exit block.
5178 ArrayRef<BasicBlock *> Blocks = L->getBlocks();
5179 if (Blocks.size() != 2)
5180 return false;
5181
5182 if (any_of(Blocks, [](BasicBlock *BB) {
5183 return !isa<BranchInst>(BB->getTerminator());
5184 }))
5185 return false;
5186
5187 return true;
5188}
5189
5190/// For Apple CPUs, we want to runtime-unroll loops to make better use if the
5191/// OOO engine's wide instruction window and various predictors.
5192static void
5195 const AArch64TTIImpl &TTI) {
5196 // Limit loops with structure that is highly likely to benefit from runtime
5197 // unrolling; that is we exclude outer loops and loops with many blocks (i.e.
5198 // likely with complex control flow). Note that the heuristics here may be
5199 // overly conservative and we err on the side of avoiding runtime unrolling
5200 // rather than unroll excessively. They are all subject to further refinement.
5201 if (!L->isInnermost() || L->getNumBlocks() > 8)
5202 return;
5203
5204 // Loops with multiple exits are handled by common code.
5205 if (!L->getExitBlock())
5206 return;
5207
5208 // Check if the loop contains any reductions that could be parallelized when
5209 // unrolling. If so, enable partial unrolling, if the trip count is know to be
5210 // a multiple of 2.
5211 bool HasParellelizableReductions =
5212 L->getNumBlocks() == 1 &&
5213 any_of(L->getHeader()->phis(),
5214 [&SE, L](PHINode &Phi) {
5215 return canParallelizeReductionWhenUnrolling(Phi, L, &SE);
5216 }) &&
5217 isLoopSizeWithinBudget(L, TTI, 12, nullptr);
5218 if (HasParellelizableReductions &&
5219 SE.getSmallConstantTripMultiple(L, L->getExitingBlock()) % 2 == 0) {
5220 UP.Partial = true;
5221 UP.MaxCount = 4;
5222 UP.AddAdditionalAccumulators = true;
5223 }
5224
5225 const SCEV *BTC = SE.getSymbolicMaxBackedgeTakenCount(L);
5227 (SE.getSmallConstantMaxTripCount(L) > 0 &&
5228 SE.getSmallConstantMaxTripCount(L) <= 32))
5229 return;
5230
5231 if (findStringMetadataForLoop(L, "llvm.loop.isvectorized"))
5232 return;
5233
5235 return;
5236
5237 // Limit to loops with trip counts that are cheap to expand.
5238 UP.SCEVExpansionBudget = 1;
5239
5240 if (HasParellelizableReductions) {
5241 UP.Runtime = true;
5243 UP.AddAdditionalAccumulators = true;
5244 }
5245
5246 // Try to unroll small loops, of few-blocks with low budget, if they have
5247 // load/store dependencies, to expose more parallel memory access streams,
5248 // or if they do little work inside a block (i.e. load -> X -> store pattern).
5249 BasicBlock *Header = L->getHeader();
5250 BasicBlock *Latch = L->getLoopLatch();
5251 if (Header == Latch) {
5252 // Estimate the size of the loop.
5253 unsigned Size;
5254 unsigned Width = 10;
5255 if (!isLoopSizeWithinBudget(L, TTI, Width, &Size))
5256 return;
5257
5258 // Try to find an unroll count that maximizes the use of the instruction
5259 // window, i.e. trying to fetch as many instructions per cycle as possible.
5260 unsigned MaxInstsPerLine = 16;
5261 unsigned UC = 1;
5262 unsigned BestUC = 1;
5263 unsigned SizeWithBestUC = BestUC * Size;
5264 while (UC <= 8) {
5265 unsigned SizeWithUC = UC * Size;
5266 if (SizeWithUC > 48)
5267 break;
5268 if ((SizeWithUC % MaxInstsPerLine) == 0 ||
5269 (SizeWithBestUC % MaxInstsPerLine) < (SizeWithUC % MaxInstsPerLine)) {
5270 BestUC = UC;
5271 SizeWithBestUC = BestUC * Size;
5272 }
5273 UC++;
5274 }
5275
5276 if (BestUC == 1)
5277 return;
5278
5279 SmallPtrSet<Value *, 8> LoadedValuesPlus;
5281 for (auto *BB : L->blocks()) {
5282 for (auto &I : *BB) {
5284 if (!Ptr)
5285 continue;
5286 const SCEV *PtrSCEV = SE.getSCEV(Ptr);
5287 if (SE.isLoopInvariant(PtrSCEV, L))
5288 continue;
5289 if (isa<LoadInst>(&I)) {
5290 LoadedValuesPlus.insert(&I);
5291 // Include in-loop 1st users of loaded values.
5292 for (auto *U : I.users())
5293 if (L->contains(cast<Instruction>(U)))
5294 LoadedValuesPlus.insert(U);
5295 } else
5296 Stores.push_back(cast<StoreInst>(&I));
5297 }
5298 }
5299
5300 if (none_of(Stores, [&LoadedValuesPlus](StoreInst *SI) {
5301 return LoadedValuesPlus.contains(SI->getOperand(0));
5302 }))
5303 return;
5304
5305 UP.Runtime = true;
5306 UP.DefaultUnrollRuntimeCount = BestUC;
5307 return;
5308 }
5309
5310 // Try to runtime-unroll loops with early-continues depending on loop-varying
5311 // loads; this helps with branch-prediction for the early-continues.
5312 auto *Term = dyn_cast<BranchInst>(Header->getTerminator());
5314 if (!Term || !Term->isConditional() || Preds.size() == 1 ||
5315 !llvm::is_contained(Preds, Header) ||
5316 none_of(Preds, [L](BasicBlock *Pred) { return L->contains(Pred); }))
5317 return;
5318
5319 std::function<bool(Instruction *, unsigned)> DependsOnLoopLoad =
5320 [&](Instruction *I, unsigned Depth) -> bool {
5321 if (isa<PHINode>(I) || L->isLoopInvariant(I) || Depth > 8)
5322 return false;
5323
5324 if (isa<LoadInst>(I))
5325 return true;
5326
5327 return any_of(I->operands(), [&](Value *V) {
5328 auto *I = dyn_cast<Instruction>(V);
5329 return I && DependsOnLoopLoad(I, Depth + 1);
5330 });
5331 };
5332 CmpPredicate Pred;
5333 Instruction *I;
5334 if (match(Term, m_Br(m_ICmp(Pred, m_Instruction(I), m_Value()), m_Value(),
5335 m_Value())) &&
5336 DependsOnLoopLoad(I, 0)) {
5337 UP.Runtime = true;
5338 }
5339}
5340
5343 OptimizationRemarkEmitter *ORE) const {
5344 // Enable partial unrolling and runtime unrolling.
5345 BaseT::getUnrollingPreferences(L, SE, UP, ORE);
5346
5347 UP.UpperBound = true;
5348
5349 // For inner loop, it is more likely to be a hot one, and the runtime check
5350 // can be promoted out from LICM pass, so the overhead is less, let's try
5351 // a larger threshold to unroll more loops.
5352 if (L->getLoopDepth() > 1)
5353 UP.PartialThreshold *= 2;
5354
5355 // Disable partial & runtime unrolling on -Os.
5357
5358 // Scan the loop: don't unroll loops with calls as this could prevent
5359 // inlining. Don't unroll auto-vectorized loops either, though do allow
5360 // unrolling of the scalar remainder.
5361 bool IsVectorized = getBooleanLoopAttribute(L, "llvm.loop.isvectorized");
5363 for (auto *BB : L->getBlocks()) {
5364 for (auto &I : *BB) {
5365 // Both auto-vectorized loops and the scalar remainder have the
5366 // isvectorized attribute, so differentiate between them by the presence
5367 // of vector instructions.
5368 if (IsVectorized && I.getType()->isVectorTy())
5369 return;
5370 if (isa<CallBase>(I)) {
5373 if (!isLoweredToCall(F))
5374 continue;
5375 return;
5376 }
5377
5378 SmallVector<const Value *, 4> Operands(I.operand_values());
5379 Cost += getInstructionCost(&I, Operands,
5381 }
5382 }
5383
5384 // Apply subtarget-specific unrolling preferences.
5385 if (ST->isAppleMLike())
5386 getAppleRuntimeUnrollPreferences(L, SE, UP, *this);
5387 else if (ST->getProcFamily() == AArch64Subtarget::Falkor &&
5390
5391 // If this is a small, multi-exit loop similar to something like std::find,
5392 // then there is typically a performance improvement achieved by unrolling.
5393 if (!L->getExitBlock() && shouldUnrollMultiExitLoop(L, SE, *this)) {
5394 UP.RuntimeUnrollMultiExit = true;
5395 UP.Runtime = true;
5396 // Limit unroll count.
5398 // Allow slightly more costly trip-count expansion to catch search loops
5399 // with pointer inductions.
5400 UP.SCEVExpansionBudget = 5;
5401 return;
5402 }
5403
5404 // Enable runtime unrolling for in-order models
5405 // If mcpu is omitted, getProcFamily() returns AArch64Subtarget::Others, so by
5406 // checking for that case, we can ensure that the default behaviour is
5407 // unchanged
5408 if (ST->getProcFamily() != AArch64Subtarget::Generic &&
5409 !ST->getSchedModel().isOutOfOrder()) {
5410 UP.Runtime = true;
5411 UP.Partial = true;
5412 UP.UnrollRemainder = true;
5414
5415 UP.UnrollAndJam = true;
5417 }
5418
5419 // Force unrolling small loops can be very useful because of the branch
5420 // taken cost of the backedge.
5422 UP.Force = true;
5423}
5424
5429
5431 Type *ExpectedType,
5432 bool CanCreate) const {
5433 switch (Inst->getIntrinsicID()) {
5434 default:
5435 return nullptr;
5436 case Intrinsic::aarch64_neon_st2:
5437 case Intrinsic::aarch64_neon_st3:
5438 case Intrinsic::aarch64_neon_st4: {
5439 // Create a struct type
5440 StructType *ST = dyn_cast<StructType>(ExpectedType);
5441 if (!CanCreate || !ST)
5442 return nullptr;
5443 unsigned NumElts = Inst->arg_size() - 1;
5444 if (ST->getNumElements() != NumElts)
5445 return nullptr;
5446 for (unsigned i = 0, e = NumElts; i != e; ++i) {
5447 if (Inst->getArgOperand(i)->getType() != ST->getElementType(i))
5448 return nullptr;
5449 }
5450 Value *Res = PoisonValue::get(ExpectedType);
5451 IRBuilder<> Builder(Inst);
5452 for (unsigned i = 0, e = NumElts; i != e; ++i) {
5453 Value *L = Inst->getArgOperand(i);
5454 Res = Builder.CreateInsertValue(Res, L, i);
5455 }
5456 return Res;
5457 }
5458 case Intrinsic::aarch64_neon_ld2:
5459 case Intrinsic::aarch64_neon_ld3:
5460 case Intrinsic::aarch64_neon_ld4:
5461 if (Inst->getType() == ExpectedType)
5462 return Inst;
5463 return nullptr;
5464 }
5465}
5466
5468 MemIntrinsicInfo &Info) const {
5469 switch (Inst->getIntrinsicID()) {
5470 default:
5471 break;
5472 case Intrinsic::aarch64_neon_ld2:
5473 case Intrinsic::aarch64_neon_ld3:
5474 case Intrinsic::aarch64_neon_ld4:
5475 Info.ReadMem = true;
5476 Info.WriteMem = false;
5477 Info.PtrVal = Inst->getArgOperand(0);
5478 break;
5479 case Intrinsic::aarch64_neon_st2:
5480 case Intrinsic::aarch64_neon_st3:
5481 case Intrinsic::aarch64_neon_st4:
5482 Info.ReadMem = false;
5483 Info.WriteMem = true;
5484 Info.PtrVal = Inst->getArgOperand(Inst->arg_size() - 1);
5485 break;
5486 }
5487
5488 switch (Inst->getIntrinsicID()) {
5489 default:
5490 return false;
5491 case Intrinsic::aarch64_neon_ld2:
5492 case Intrinsic::aarch64_neon_st2:
5493 Info.MatchingId = VECTOR_LDST_TWO_ELEMENTS;
5494 break;
5495 case Intrinsic::aarch64_neon_ld3:
5496 case Intrinsic::aarch64_neon_st3:
5497 Info.MatchingId = VECTOR_LDST_THREE_ELEMENTS;
5498 break;
5499 case Intrinsic::aarch64_neon_ld4:
5500 case Intrinsic::aarch64_neon_st4:
5501 Info.MatchingId = VECTOR_LDST_FOUR_ELEMENTS;
5502 break;
5503 }
5504 return true;
5505}
5506
5507/// See if \p I should be considered for address type promotion. We check if \p
5508/// I is a sext with right type and used in memory accesses. If it used in a
5509/// "complex" getelementptr, we allow it to be promoted without finding other
5510/// sext instructions that sign extended the same initial value. A getelementptr
5511/// is considered as "complex" if it has more than 2 operands.
5513 const Instruction &I, bool &AllowPromotionWithoutCommonHeader) const {
5514 bool Considerable = false;
5515 AllowPromotionWithoutCommonHeader = false;
5516 if (!isa<SExtInst>(&I))
5517 return false;
5518 Type *ConsideredSExtType =
5519 Type::getInt64Ty(I.getParent()->getParent()->getContext());
5520 if (I.getType() != ConsideredSExtType)
5521 return false;
5522 // See if the sext is the one with the right type and used in at least one
5523 // GetElementPtrInst.
5524 for (const User *U : I.users()) {
5525 if (const GetElementPtrInst *GEPInst = dyn_cast<GetElementPtrInst>(U)) {
5526 Considerable = true;
5527 // A getelementptr is considered as "complex" if it has more than 2
5528 // operands. We will promote a SExt used in such complex GEP as we
5529 // expect some computation to be merged if they are done on 64 bits.
5530 if (GEPInst->getNumOperands() > 2) {
5531 AllowPromotionWithoutCommonHeader = true;
5532 break;
5533 }
5534 }
5535 }
5536 return Considerable;
5537}
5538
5540 const RecurrenceDescriptor &RdxDesc, ElementCount VF) const {
5541 if (!VF.isScalable())
5542 return true;
5543
5544 Type *Ty = RdxDesc.getRecurrenceType();
5545 if (Ty->isBFloatTy() || !isElementTypeLegalForScalableVector(Ty))
5546 return false;
5547
5548 switch (RdxDesc.getRecurrenceKind()) {
5549 case RecurKind::Sub:
5551 case RecurKind::Add:
5552 case RecurKind::FAdd:
5553 case RecurKind::And:
5554 case RecurKind::Or:
5555 case RecurKind::Xor:
5556 case RecurKind::SMin:
5557 case RecurKind::SMax:
5558 case RecurKind::UMin:
5559 case RecurKind::UMax:
5560 case RecurKind::FMin:
5561 case RecurKind::FMax:
5562 case RecurKind::FMulAdd:
5563 case RecurKind::AnyOf:
5565 return true;
5566 default:
5567 return false;
5568 }
5569}
5570
5573 FastMathFlags FMF,
5575 // The code-generator is currently not able to handle scalable vectors
5576 // of <vscale x 1 x eltty> yet, so return an invalid cost to avoid selecting
5577 // it. This change will be removed when code-generation for these types is
5578 // sufficiently reliable.
5579 if (auto *VTy = dyn_cast<ScalableVectorType>(Ty))
5580 if (VTy->getElementCount() == ElementCount::getScalable(1))
5582
5583 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
5584
5585 if (LT.second.getScalarType() == MVT::f16 && !ST->hasFullFP16())
5586 return BaseT::getMinMaxReductionCost(IID, Ty, FMF, CostKind);
5587
5588 InstructionCost LegalizationCost = 0;
5589 if (LT.first > 1) {
5590 Type *LegalVTy = EVT(LT.second).getTypeForEVT(Ty->getContext());
5591 IntrinsicCostAttributes Attrs(IID, LegalVTy, {LegalVTy, LegalVTy}, FMF);
5592 LegalizationCost = getIntrinsicInstrCost(Attrs, CostKind) * (LT.first - 1);
5593 }
5594
5595 return LegalizationCost + /*Cost of horizontal reduction*/ 2;
5596}
5597
5599 unsigned Opcode, VectorType *ValTy, TTI::TargetCostKind CostKind) const {
5600 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
5601 InstructionCost LegalizationCost = 0;
5602 if (LT.first > 1) {
5603 Type *LegalVTy = EVT(LT.second).getTypeForEVT(ValTy->getContext());
5604 LegalizationCost = getArithmeticInstrCost(Opcode, LegalVTy, CostKind);
5605 LegalizationCost *= LT.first - 1;
5606 }
5607
5608 int ISD = TLI->InstructionOpcodeToISD(Opcode);
5609 assert(ISD && "Invalid opcode");
5610 // Add the final reduction cost for the legal horizontal reduction
5611 switch (ISD) {
5612 case ISD::ADD:
5613 case ISD::AND:
5614 case ISD::OR:
5615 case ISD::XOR:
5616 case ISD::FADD:
5617 return LegalizationCost + 2;
5618 default:
5620 }
5621}
5622
5625 std::optional<FastMathFlags> FMF,
5627 // The code-generator is currently not able to handle scalable vectors
5628 // of <vscale x 1 x eltty> yet, so return an invalid cost to avoid selecting
5629 // it. This change will be removed when code-generation for these types is
5630 // sufficiently reliable.
5631 if (auto *VTy = dyn_cast<ScalableVectorType>(ValTy))
5632 if (VTy->getElementCount() == ElementCount::getScalable(1))
5634
5636 if (auto *FixedVTy = dyn_cast<FixedVectorType>(ValTy)) {
5637 InstructionCost BaseCost =
5638 BaseT::getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind);
5639 // Add on extra cost to reflect the extra overhead on some CPUs. We still
5640 // end up vectorizing for more computationally intensive loops.
5641 return BaseCost + FixedVTy->getNumElements();
5642 }
5643
5644 if (Opcode != Instruction::FAdd)
5646
5647 auto *VTy = cast<ScalableVectorType>(ValTy);
5649 getArithmeticInstrCost(Opcode, VTy->getScalarType(), CostKind);
5650 Cost *= getMaxNumElements(VTy->getElementCount());
5651 return Cost;
5652 }
5653
5654 if (isa<ScalableVectorType>(ValTy))
5655 return getArithmeticReductionCostSVE(Opcode, ValTy, CostKind);
5656
5657 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
5658 MVT MTy = LT.second;
5659 int ISD = TLI->InstructionOpcodeToISD(Opcode);
5660 assert(ISD && "Invalid opcode");
5661
5662 // Horizontal adds can use the 'addv' instruction. We model the cost of these
5663 // instructions as twice a normal vector add, plus 1 for each legalization
5664 // step (LT.first). This is the only arithmetic vector reduction operation for
5665 // which we have an instruction.
5666 // OR, XOR and AND costs should match the codegen from:
5667 // OR: llvm/test/CodeGen/AArch64/reduce-or.ll
5668 // XOR: llvm/test/CodeGen/AArch64/reduce-xor.ll
5669 // AND: llvm/test/CodeGen/AArch64/reduce-and.ll
5670 static const CostTblEntry CostTblNoPairwise[]{
5671 {ISD::ADD, MVT::v8i8, 2},
5672 {ISD::ADD, MVT::v16i8, 2},
5673 {ISD::ADD, MVT::v4i16, 2},
5674 {ISD::ADD, MVT::v8i16, 2},
5675 {ISD::ADD, MVT::v2i32, 2},
5676 {ISD::ADD, MVT::v4i32, 2},
5677 {ISD::ADD, MVT::v2i64, 2},
5678 {ISD::OR, MVT::v8i8, 5}, // fmov + orr_lsr + orr_lsr + lsr + orr
5679 {ISD::OR, MVT::v16i8, 7}, // ext + orr + same as v8i8
5680 {ISD::OR, MVT::v4i16, 4}, // fmov + orr_lsr + lsr + orr
5681 {ISD::OR, MVT::v8i16, 6}, // ext + orr + same as v4i16
5682 {ISD::OR, MVT::v2i32, 3}, // fmov + lsr + orr
5683 {ISD::OR, MVT::v4i32, 5}, // ext + orr + same as v2i32
5684 {ISD::OR, MVT::v2i64, 3}, // ext + orr + fmov
5685 {ISD::XOR, MVT::v8i8, 5}, // Same as above for or...
5686 {ISD::XOR, MVT::v16i8, 7},
5687 {ISD::XOR, MVT::v4i16, 4},
5688 {ISD::XOR, MVT::v8i16, 6},
5689 {ISD::XOR, MVT::v2i32, 3},
5690 {ISD::XOR, MVT::v4i32, 5},
5691 {ISD::XOR, MVT::v2i64, 3},
5692 {ISD::AND, MVT::v8i8, 5}, // Same as above for or...
5693 {ISD::AND, MVT::v16i8, 7},
5694 {ISD::AND, MVT::v4i16, 4},
5695 {ISD::AND, MVT::v8i16, 6},
5696 {ISD::AND, MVT::v2i32, 3},
5697 {ISD::AND, MVT::v4i32, 5},
5698 {ISD::AND, MVT::v2i64, 3},
5699 };
5700 switch (ISD) {
5701 default:
5702 break;
5703 case ISD::FADD:
5704 if (Type *EltTy = ValTy->getScalarType();
5705 // FIXME: For half types without fullfp16 support, this could extend and
5706 // use a fp32 faddp reduction but current codegen unrolls.
5707 MTy.isVector() && (EltTy->isFloatTy() || EltTy->isDoubleTy() ||
5708 (EltTy->isHalfTy() && ST->hasFullFP16()))) {
5709 const unsigned NElts = MTy.getVectorNumElements();
5710 if (ValTy->getElementCount().getFixedValue() >= 2 && NElts >= 2 &&
5711 isPowerOf2_32(NElts))
5712 // Reduction corresponding to series of fadd instructions is lowered to
5713 // series of faddp instructions. faddp has latency/throughput that
5714 // matches fadd instruction and hence, every faddp instruction can be
5715 // considered to have a relative cost = 1 with
5716 // CostKind = TCK_RecipThroughput.
5717 // An faddp will pairwise add vector elements, so the size of input
5718 // vector reduces by half every time, requiring
5719 // #(faddp instructions) = log2_32(NElts).
5720 return (LT.first - 1) + /*No of faddp instructions*/ Log2_32(NElts);
5721 }
5722 break;
5723 case ISD::ADD:
5724 if (const auto *Entry = CostTableLookup(CostTblNoPairwise, ISD, MTy))
5725 return (LT.first - 1) + Entry->Cost;
5726 break;
5727 case ISD::XOR:
5728 case ISD::AND:
5729 case ISD::OR:
5730 const auto *Entry = CostTableLookup(CostTblNoPairwise, ISD, MTy);
5731 if (!Entry)
5732 break;
5733 auto *ValVTy = cast<FixedVectorType>(ValTy);
5734 if (MTy.getVectorNumElements() <= ValVTy->getNumElements() &&
5735 isPowerOf2_32(ValVTy->getNumElements())) {
5736 InstructionCost ExtraCost = 0;
5737 if (LT.first != 1) {
5738 // Type needs to be split, so there is an extra cost of LT.first - 1
5739 // arithmetic ops.
5740 auto *Ty = FixedVectorType::get(ValTy->getElementType(),
5741 MTy.getVectorNumElements());
5742 ExtraCost = getArithmeticInstrCost(Opcode, Ty, CostKind);
5743 ExtraCost *= LT.first - 1;
5744 }
5745 // All and/or/xor of i1 will be lowered with maxv/minv/addv + fmov
5746 auto Cost = ValVTy->getElementType()->isIntegerTy(1) ? 2 : Entry->Cost;
5747 return Cost + ExtraCost;
5748 }
5749 break;
5750 }
5751 return BaseT::getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind);
5752}
5753
5755 unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *VecTy,
5756 std::optional<FastMathFlags> FMF, TTI::TargetCostKind CostKind) const {
5757 EVT VecVT = TLI->getValueType(DL, VecTy);
5758 EVT ResVT = TLI->getValueType(DL, ResTy);
5759
5760 if (Opcode == Instruction::Add && VecVT.isSimple() && ResVT.isSimple() &&
5761 VecVT.getSizeInBits() >= 64) {
5762 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(VecTy);
5763
5764 // The legal cases are:
5765 // UADDLV 8/16/32->32
5766 // UADDLP 32->64
5767 unsigned RevVTSize = ResVT.getSizeInBits();
5768 if (((LT.second == MVT::v8i8 || LT.second == MVT::v16i8) &&
5769 RevVTSize <= 32) ||
5770 ((LT.second == MVT::v4i16 || LT.second == MVT::v8i16) &&
5771 RevVTSize <= 32) ||
5772 ((LT.second == MVT::v2i32 || LT.second == MVT::v4i32) &&
5773 RevVTSize <= 64))
5774 return (LT.first - 1) * 2 + 2;
5775 }
5776
5777 return BaseT::getExtendedReductionCost(Opcode, IsUnsigned, ResTy, VecTy, FMF,
5778 CostKind);
5779}
5780
5782AArch64TTIImpl::getMulAccReductionCost(bool IsUnsigned, unsigned RedOpcode,
5783 Type *ResTy, VectorType *VecTy,
5785 EVT VecVT = TLI->getValueType(DL, VecTy);
5786 EVT ResVT = TLI->getValueType(DL, ResTy);
5787
5788 if (ST->hasDotProd() && VecVT.isSimple() && ResVT.isSimple() &&
5789 RedOpcode == Instruction::Add) {
5790 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(VecTy);
5791
5792 // The legal cases with dotprod are
5793 // UDOT 8->32
5794 // Which requires an additional uaddv to sum the i32 values.
5795 if ((LT.second == MVT::v8i8 || LT.second == MVT::v16i8) &&
5796 ResVT == MVT::i32)
5797 return LT.first + 2;
5798 }
5799
5800 return BaseT::getMulAccReductionCost(IsUnsigned, RedOpcode, ResTy, VecTy,
5801 CostKind);
5802}
5803
5807 static const CostTblEntry ShuffleTbl[] = {
5808 { TTI::SK_Splice, MVT::nxv16i8, 1 },
5809 { TTI::SK_Splice, MVT::nxv8i16, 1 },
5810 { TTI::SK_Splice, MVT::nxv4i32, 1 },
5811 { TTI::SK_Splice, MVT::nxv2i64, 1 },
5812 { TTI::SK_Splice, MVT::nxv2f16, 1 },
5813 { TTI::SK_Splice, MVT::nxv4f16, 1 },
5814 { TTI::SK_Splice, MVT::nxv8f16, 1 },
5815 { TTI::SK_Splice, MVT::nxv2bf16, 1 },
5816 { TTI::SK_Splice, MVT::nxv4bf16, 1 },
5817 { TTI::SK_Splice, MVT::nxv8bf16, 1 },
5818 { TTI::SK_Splice, MVT::nxv2f32, 1 },
5819 { TTI::SK_Splice, MVT::nxv4f32, 1 },
5820 { TTI::SK_Splice, MVT::nxv2f64, 1 },
5821 };
5822
5823 // The code-generator is currently not able to handle scalable vectors
5824 // of <vscale x 1 x eltty> yet, so return an invalid cost to avoid selecting
5825 // it. This change will be removed when code-generation for these types is
5826 // sufficiently reliable.
5829
5830 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Tp);
5831 Type *LegalVTy = EVT(LT.second).getTypeForEVT(Tp->getContext());
5832 EVT PromotedVT = LT.second.getScalarType() == MVT::i1
5833 ? TLI->getPromotedVTForPredicate(EVT(LT.second))
5834 : LT.second;
5835 Type *PromotedVTy = EVT(PromotedVT).getTypeForEVT(Tp->getContext());
5836 InstructionCost LegalizationCost = 0;
5837 if (Index < 0) {
5838 LegalizationCost =
5839 getCmpSelInstrCost(Instruction::ICmp, PromotedVTy, PromotedVTy,
5841 getCmpSelInstrCost(Instruction::Select, PromotedVTy, LegalVTy,
5843 }
5844
5845 // Predicated splice are promoted when lowering. See AArch64ISelLowering.cpp
5846 // Cost performed on a promoted type.
5847 if (LT.second.getScalarType() == MVT::i1) {
5848 LegalizationCost +=
5849 getCastInstrCost(Instruction::ZExt, PromotedVTy, LegalVTy,
5851 getCastInstrCost(Instruction::Trunc, LegalVTy, PromotedVTy,
5853 }
5854 const auto *Entry =
5855 CostTableLookup(ShuffleTbl, TTI::SK_Splice, PromotedVT.getSimpleVT());
5856 assert(Entry && "Illegal Type for Splice");
5857 LegalizationCost += Entry->Cost;
5858 return LegalizationCost * LT.first;
5859}
5860
5862 unsigned Opcode, Type *InputTypeA, Type *InputTypeB, Type *AccumType,
5864 TTI::PartialReductionExtendKind OpBExtend, std::optional<unsigned> BinOp,
5865 TTI::TargetCostKind CostKind, std::optional<FastMathFlags> FMF) const {
5867
5869 return Invalid;
5870
5871 if (VF.isFixed() && !ST->isSVEorStreamingSVEAvailable() &&
5872 (!ST->isNeonAvailable() || !ST->hasDotProd()))
5873 return Invalid;
5874
5875 if ((Opcode != Instruction::Add && Opcode != Instruction::Sub &&
5876 Opcode != Instruction::FAdd) ||
5877 OpAExtend == TTI::PR_None)
5878 return Invalid;
5879
5880 // Floating-point partial reductions are invalid if `reassoc` and `contract`
5881 // are not allowed.
5882 if (AccumType->isFloatingPointTy()) {
5883 assert(FMF && "Missing FastMathFlags for floating-point partial reduction");
5884 if (!FMF->allowReassoc() || !FMF->allowContract())
5885 return Invalid;
5886 } else {
5887 assert(!FMF &&
5888 "FastMathFlags only apply to floating-point partial reductions");
5889 }
5890
5891 assert((BinOp || (OpBExtend == TTI::PR_None && !InputTypeB)) &&
5892 (!BinOp || (OpBExtend != TTI::PR_None && InputTypeB)) &&
5893 "Unexpected values for OpBExtend or InputTypeB");
5894
5895 // We only support multiply binary operations for now, and for muls we
5896 // require the types being extended to be the same.
5897 if (BinOp && ((*BinOp != Instruction::Mul && *BinOp != Instruction::FMul) ||
5898 InputTypeA != InputTypeB))
5899 return Invalid;
5900
5901 bool IsUSDot = OpBExtend != TTI::PR_None && OpAExtend != OpBExtend;
5902 if (IsUSDot && !ST->hasMatMulInt8())
5903 return Invalid;
5904
5905 unsigned Ratio =
5906 AccumType->getScalarSizeInBits() / InputTypeA->getScalarSizeInBits();
5907 if (VF.getKnownMinValue() <= Ratio)
5908 return Invalid;
5909
5910 VectorType *InputVectorType = VectorType::get(InputTypeA, VF);
5911 VectorType *AccumVectorType =
5912 VectorType::get(AccumType, VF.divideCoefficientBy(Ratio));
5913 // We don't yet support all kinds of legalization.
5914 auto TC = TLI->getTypeConversion(AccumVectorType->getContext(),
5915 EVT::getEVT(AccumVectorType));
5916 switch (TC.first) {
5917 default:
5918 return Invalid;
5922 // The legalised type (e.g. after splitting) must be legal too.
5923 if (TLI->getTypeAction(AccumVectorType->getContext(), TC.second) !=
5925 return Invalid;
5926 break;
5927 }
5928
5929 std::pair<InstructionCost, MVT> AccumLT =
5930 getTypeLegalizationCost(AccumVectorType);
5931 std::pair<InstructionCost, MVT> InputLT =
5932 getTypeLegalizationCost(InputVectorType);
5933
5934 InstructionCost Cost = InputLT.first * TTI::TCC_Basic;
5935
5936 // The sub/negation cannot be folded into the operands of
5937 // ISD::PARTIAL_REDUCE_*MLA, so make the cost more expensive.
5938 if (Opcode == Instruction::Sub)
5939 Cost += 8;
5940
5941 // Prefer using full types by costing half-full input types as more expensive.
5942 if (TypeSize::isKnownLT(InputVectorType->getPrimitiveSizeInBits(),
5944 // FIXME: This can be removed after the cost of the extends are folded into
5945 // the dot-product expression in VPlan, after landing:
5946 // https://github.com/llvm/llvm-project/pull/147302
5947 Cost *= 2;
5948
5949 if (ST->isSVEorStreamingSVEAvailable() && !IsUSDot) {
5950 // i16 -> i64 is natively supported for udot/sdot
5951 if (AccumLT.second.getScalarType() == MVT::i64 &&
5952 InputLT.second.getScalarType() == MVT::i16)
5953 return Cost;
5954 // i16 -> i32 is natively supported with SVE2p1
5955 if (AccumLT.second.getScalarType() == MVT::i32 &&
5956 InputLT.second.getScalarType() == MVT::i16 &&
5957 (ST->hasSVE2p1() || ST->hasSME2()))
5958 return Cost;
5959 // i8 -> i64 is supported with an extra level of extends
5960 if (AccumLT.second.getScalarType() == MVT::i64 &&
5961 InputLT.second.getScalarType() == MVT::i8)
5962 // FIXME: This cost should probably be a little higher, e.g. Cost + 2
5963 // because it requires two extra extends on the inputs. But if we'd change
5964 // that now, a regular reduction would be cheaper because the costs of
5965 // the extends in the IR are still counted. This can be fixed
5966 // after https://github.com/llvm/llvm-project/pull/147302 has landed.
5967 return Cost;
5968 }
5969
5970 // i8 -> i32 is natively supported for udot/sdot/usdot, both for NEON and SVE.
5971 if (ST->isSVEorStreamingSVEAvailable() ||
5972 (AccumLT.second.isFixedLengthVector() && ST->isNeonAvailable() &&
5973 ST->hasDotProd())) {
5974 if (AccumLT.second.getScalarType() == MVT::i32 &&
5975 InputLT.second.getScalarType() == MVT::i8)
5976 return Cost;
5977 }
5978
5979 // f16 -> f32 is natively supported for fdot
5980 if (Opcode == Instruction::FAdd && (ST->hasSME2() || ST->hasSVE2p1())) {
5981 if (AccumLT.second.getScalarType() == MVT::f32 &&
5982 InputLT.second.getScalarType() == MVT::f16 &&
5983 AccumLT.second.getVectorMinNumElements() == 4 &&
5984 InputLT.second.getVectorMinNumElements() == 8)
5985 return Cost;
5986 // Floating-point types aren't promoted, so expanding the partial reduction
5987 // is more expensive.
5988 return Cost + 20;
5989 }
5990
5991 // Add additional cost for the extends that would need to be inserted.
5992 return Cost + 2;
5993}
5994
5997 VectorType *SrcTy, ArrayRef<int> Mask,
5998 TTI::TargetCostKind CostKind, int Index,
6000 const Instruction *CxtI) const {
6001 assert((Mask.empty() || DstTy->isScalableTy() ||
6002 Mask.size() == DstTy->getElementCount().getKnownMinValue()) &&
6003 "Expected the Mask to match the return size if given");
6004 assert(SrcTy->getScalarType() == DstTy->getScalarType() &&
6005 "Expected the same scalar types");
6006 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(SrcTy);
6007
6008 // If we have a Mask, and the LT is being legalized somehow, split the Mask
6009 // into smaller vectors and sum the cost of each shuffle.
6010 if (!Mask.empty() && isa<FixedVectorType>(SrcTy) && LT.second.isVector() &&
6011 LT.second.getScalarSizeInBits() * Mask.size() > 128 &&
6012 SrcTy->getScalarSizeInBits() == LT.second.getScalarSizeInBits() &&
6013 Mask.size() > LT.second.getVectorNumElements() && !Index && !SubTp) {
6014 // Check for LD3/LD4 instructions, which are represented in llvm IR as
6015 // deinterleaving-shuffle(load). The shuffle cost could potentially be free,
6016 // but we model it with a cost of LT.first so that LD3/LD4 have a higher
6017 // cost than just the load.
6018 if (Args.size() >= 1 && isa<LoadInst>(Args[0]) &&
6021 return std::max<InstructionCost>(1, LT.first / 4);
6022
6023 // Check for ST3/ST4 instructions, which are represented in llvm IR as
6024 // store(interleaving-shuffle). The shuffle cost could potentially be free,
6025 // but we model it with a cost of LT.first so that ST3/ST4 have a higher
6026 // cost than just the store.
6027 if (CxtI && CxtI->hasOneUse() && isa<StoreInst>(*CxtI->user_begin()) &&
6029 Mask, 4, SrcTy->getElementCount().getKnownMinValue() * 2) ||
6031 Mask, 3, SrcTy->getElementCount().getKnownMinValue() * 2)))
6032 return LT.first;
6033
6034 unsigned TpNumElts = Mask.size();
6035 unsigned LTNumElts = LT.second.getVectorNumElements();
6036 unsigned NumVecs = (TpNumElts + LTNumElts - 1) / LTNumElts;
6037 VectorType *NTp = VectorType::get(SrcTy->getScalarType(),
6038 LT.second.getVectorElementCount());
6040 std::map<std::tuple<unsigned, unsigned, SmallVector<int>>, InstructionCost>
6041 PreviousCosts;
6042 for (unsigned N = 0; N < NumVecs; N++) {
6043 SmallVector<int> NMask;
6044 // Split the existing mask into chunks of size LTNumElts. Track the source
6045 // sub-vectors to ensure the result has at most 2 inputs.
6046 unsigned Source1 = -1U, Source2 = -1U;
6047 unsigned NumSources = 0;
6048 for (unsigned E = 0; E < LTNumElts; E++) {
6049 int MaskElt = (N * LTNumElts + E < TpNumElts) ? Mask[N * LTNumElts + E]
6051 if (MaskElt < 0) {
6053 continue;
6054 }
6055
6056 // Calculate which source from the input this comes from and whether it
6057 // is new to us.
6058 unsigned Source = MaskElt / LTNumElts;
6059 if (NumSources == 0) {
6060 Source1 = Source;
6061 NumSources = 1;
6062 } else if (NumSources == 1 && Source != Source1) {
6063 Source2 = Source;
6064 NumSources = 2;
6065 } else if (NumSources >= 2 && Source != Source1 && Source != Source2) {
6066 NumSources++;
6067 }
6068
6069 // Add to the new mask. For the NumSources>2 case these are not correct,
6070 // but are only used for the modular lane number.
6071 if (Source == Source1)
6072 NMask.push_back(MaskElt % LTNumElts);
6073 else if (Source == Source2)
6074 NMask.push_back(MaskElt % LTNumElts + LTNumElts);
6075 else
6076 NMask.push_back(MaskElt % LTNumElts);
6077 }
6078 // Check if we have already generated this sub-shuffle, which means we
6079 // will have already generated the output. For example a <16 x i32> splat
6080 // will be the same sub-splat 4 times, which only needs to be generated
6081 // once and reused.
6082 auto Result =
6083 PreviousCosts.insert({std::make_tuple(Source1, Source2, NMask), 0});
6084 // Check if it was already in the map (already costed).
6085 if (!Result.second)
6086 continue;
6087 // If the sub-mask has at most 2 input sub-vectors then re-cost it using
6088 // getShuffleCost. If not then cost it using the worst case as the number
6089 // of element moves into a new vector.
6090 InstructionCost NCost =
6091 NumSources <= 2
6092 ? getShuffleCost(NumSources <= 1 ? TTI::SK_PermuteSingleSrc
6094 NTp, NTp, NMask, CostKind, 0, nullptr, Args,
6095 CxtI)
6096 : LTNumElts;
6097 Result.first->second = NCost;
6098 Cost += NCost;
6099 }
6100 return Cost;
6101 }
6102
6103 Kind = improveShuffleKindFromMask(Kind, Mask, SrcTy, Index, SubTp);
6104 bool IsExtractSubvector = Kind == TTI::SK_ExtractSubvector;
6105 // A subvector extract can be implemented with a NEON/SVE ext (or trivial
6106 // extract, if from lane 0) for 128-bit NEON vectors or legal SVE vectors.
6107 // This currently only handles low or high extracts to prevent SLP vectorizer
6108 // regressions.
6109 // Note that SVE's ext instruction is destructive, but it can be fused with
6110 // a movprfx to act like a constructive instruction.
6111 if (IsExtractSubvector && LT.second.isFixedLengthVector()) {
6112 if (LT.second.getFixedSizeInBits() >= 128 &&
6113 cast<FixedVectorType>(SubTp)->getNumElements() ==
6114 LT.second.getVectorNumElements() / 2) {
6115 if (Index == 0)
6116 return 0;
6117 if (Index == (int)LT.second.getVectorNumElements() / 2)
6118 return 1;
6119 }
6121 }
6122 // FIXME: This was added to keep the costs equal when adding DstTys. Update
6123 // the code to handle length-changing shuffles.
6124 if (Kind == TTI::SK_InsertSubvector) {
6125 LT = getTypeLegalizationCost(DstTy);
6126 SrcTy = DstTy;
6127 }
6128
6129 // Check for identity masks, which we can treat as free for both fixed and
6130 // scalable vector paths.
6131 if (!Mask.empty() && LT.second.isFixedLengthVector() &&
6132 (Kind == TTI::SK_PermuteTwoSrc || Kind == TTI::SK_PermuteSingleSrc) &&
6133 all_of(enumerate(Mask), [](const auto &M) {
6134 return M.value() < 0 || M.value() == (int)M.index();
6135 }))
6136 return 0;
6137
6138 // Segmented shuffle matching.
6139 if (Kind == TTI::SK_PermuteSingleSrc && isa<FixedVectorType>(SrcTy) &&
6140 !Mask.empty() && SrcTy->getPrimitiveSizeInBits().isNonZero() &&
6141 SrcTy->getPrimitiveSizeInBits().isKnownMultipleOf(
6143
6145 unsigned Segments =
6147 unsigned SegmentElts = VTy->getNumElements() / Segments;
6148
6149 // dupq zd.t, zn.t[idx]
6150 if ((ST->hasSVE2p1() || ST->hasSME2p1()) &&
6151 ST->isSVEorStreamingSVEAvailable() &&
6152 isDUPQMask(Mask, Segments, SegmentElts))
6153 return LT.first;
6154
6155 // mov zd.q, vn
6156 if (ST->isSVEorStreamingSVEAvailable() &&
6157 isDUPFirstSegmentMask(Mask, Segments, SegmentElts))
6158 return LT.first;
6159 }
6160
6161 // Check for broadcast loads, which are supported by the LD1R instruction.
6162 // In terms of code-size, the shuffle vector is free when a load + dup get
6163 // folded into a LD1R. That's what we check and return here. For performance
6164 // and reciprocal throughput, a LD1R is not completely free. In this case, we
6165 // return the cost for the broadcast below (i.e. 1 for most/all types), so
6166 // that we model the load + dup sequence slightly higher because LD1R is a
6167 // high latency instruction.
6168 if (CostKind == TTI::TCK_CodeSize && Kind == TTI::SK_Broadcast) {
6169 bool IsLoad = !Args.empty() && isa<LoadInst>(Args[0]);
6170 if (IsLoad && LT.second.isVector() &&
6171 isLegalBroadcastLoad(SrcTy->getElementType(),
6172 LT.second.getVectorElementCount()))
6173 return 0;
6174 }
6175
6176 // If we have 4 elements for the shuffle and a Mask, get the cost straight
6177 // from the perfect shuffle tables.
6178 if (Mask.size() == 4 &&
6179 SrcTy->getElementCount() == ElementCount::getFixed(4) &&
6180 (SrcTy->getScalarSizeInBits() == 16 ||
6181 SrcTy->getScalarSizeInBits() == 32) &&
6182 all_of(Mask, [](int E) { return E < 8; }))
6183 return getPerfectShuffleCost(Mask);
6184
6185 // Check for other shuffles that are not SK_ kinds but we have native
6186 // instructions for, for example ZIP and UZP.
6187 unsigned Unused;
6188 if (LT.second.isFixedLengthVector() &&
6189 LT.second.getVectorNumElements() == Mask.size() &&
6190 (Kind == TTI::SK_PermuteTwoSrc || Kind == TTI::SK_PermuteSingleSrc ||
6191 // Discrepancies between isTRNMask and ShuffleVectorInst::isTransposeMask
6192 // mean that we can end up with shuffles that satisfy isTRNMask, but end
6193 // up labelled as TTI::SK_InsertSubvector. (e.g. {2, 0}).
6194 Kind == TTI::SK_InsertSubvector) &&
6195 (isZIPMask(Mask, LT.second.getVectorNumElements(), Unused, Unused) ||
6196 isTRNMask(Mask, LT.second.getVectorNumElements(), Unused, Unused) ||
6197 isUZPMask(Mask, LT.second.getVectorNumElements(), Unused) ||
6198 isREVMask(Mask, LT.second.getScalarSizeInBits(),
6199 LT.second.getVectorNumElements(), 16) ||
6200 isREVMask(Mask, LT.second.getScalarSizeInBits(),
6201 LT.second.getVectorNumElements(), 32) ||
6202 isREVMask(Mask, LT.second.getScalarSizeInBits(),
6203 LT.second.getVectorNumElements(), 64) ||
6204 // Check for non-zero lane splats
6205 all_of(drop_begin(Mask),
6206 [&Mask](int M) { return M < 0 || M == Mask[0]; })))
6207 return 1;
6208
6209 if (Kind == TTI::SK_Broadcast || Kind == TTI::SK_Transpose ||
6210 Kind == TTI::SK_Select || Kind == TTI::SK_PermuteSingleSrc ||
6211 Kind == TTI::SK_Reverse || Kind == TTI::SK_Splice) {
6212 static const CostTblEntry ShuffleTbl[] = {
6213 // Broadcast shuffle kinds can be performed with 'dup'.
6214 {TTI::SK_Broadcast, MVT::v8i8, 1},
6215 {TTI::SK_Broadcast, MVT::v16i8, 1},
6216 {TTI::SK_Broadcast, MVT::v4i16, 1},
6217 {TTI::SK_Broadcast, MVT::v8i16, 1},
6218 {TTI::SK_Broadcast, MVT::v2i32, 1},
6219 {TTI::SK_Broadcast, MVT::v4i32, 1},
6220 {TTI::SK_Broadcast, MVT::v2i64, 1},
6221 {TTI::SK_Broadcast, MVT::v4f16, 1},
6222 {TTI::SK_Broadcast, MVT::v8f16, 1},
6223 {TTI::SK_Broadcast, MVT::v4bf16, 1},
6224 {TTI::SK_Broadcast, MVT::v8bf16, 1},
6225 {TTI::SK_Broadcast, MVT::v2f32, 1},
6226 {TTI::SK_Broadcast, MVT::v4f32, 1},
6227 {TTI::SK_Broadcast, MVT::v2f64, 1},
6228 // Transpose shuffle kinds can be performed with 'trn1/trn2' and
6229 // 'zip1/zip2' instructions.
6230 {TTI::SK_Transpose, MVT::v8i8, 1},
6231 {TTI::SK_Transpose, MVT::v16i8, 1},
6232 {TTI::SK_Transpose, MVT::v4i16, 1},
6233 {TTI::SK_Transpose, MVT::v8i16, 1},
6234 {TTI::SK_Transpose, MVT::v2i32, 1},
6235 {TTI::SK_Transpose, MVT::v4i32, 1},
6236 {TTI::SK_Transpose, MVT::v2i64, 1},
6237 {TTI::SK_Transpose, MVT::v4f16, 1},
6238 {TTI::SK_Transpose, MVT::v8f16, 1},
6239 {TTI::SK_Transpose, MVT::v4bf16, 1},
6240 {TTI::SK_Transpose, MVT::v8bf16, 1},
6241 {TTI::SK_Transpose, MVT::v2f32, 1},
6242 {TTI::SK_Transpose, MVT::v4f32, 1},
6243 {TTI::SK_Transpose, MVT::v2f64, 1},
6244 // Select shuffle kinds.
6245 // TODO: handle vXi8/vXi16.
6246 {TTI::SK_Select, MVT::v2i32, 1}, // mov.
6247 {TTI::SK_Select, MVT::v4i32, 2}, // rev+trn (or similar).
6248 {TTI::SK_Select, MVT::v2i64, 1}, // mov.
6249 {TTI::SK_Select, MVT::v2f32, 1}, // mov.
6250 {TTI::SK_Select, MVT::v4f32, 2}, // rev+trn (or similar).
6251 {TTI::SK_Select, MVT::v2f64, 1}, // mov.
6252 // PermuteSingleSrc shuffle kinds.
6253 {TTI::SK_PermuteSingleSrc, MVT::v2i32, 1}, // mov.
6254 {TTI::SK_PermuteSingleSrc, MVT::v4i32, 3}, // perfectshuffle worst case.
6255 {TTI::SK_PermuteSingleSrc, MVT::v2i64, 1}, // mov.
6256 {TTI::SK_PermuteSingleSrc, MVT::v2f32, 1}, // mov.
6257 {TTI::SK_PermuteSingleSrc, MVT::v4f32, 3}, // perfectshuffle worst case.
6258 {TTI::SK_PermuteSingleSrc, MVT::v2f64, 1}, // mov.
6259 {TTI::SK_PermuteSingleSrc, MVT::v4i16, 3}, // perfectshuffle worst case.
6260 {TTI::SK_PermuteSingleSrc, MVT::v4f16, 3}, // perfectshuffle worst case.
6261 {TTI::SK_PermuteSingleSrc, MVT::v4bf16, 3}, // same
6262 {TTI::SK_PermuteSingleSrc, MVT::v8i16, 8}, // constpool + load + tbl
6263 {TTI::SK_PermuteSingleSrc, MVT::v8f16, 8}, // constpool + load + tbl
6264 {TTI::SK_PermuteSingleSrc, MVT::v8bf16, 8}, // constpool + load + tbl
6265 {TTI::SK_PermuteSingleSrc, MVT::v8i8, 8}, // constpool + load + tbl
6266 {TTI::SK_PermuteSingleSrc, MVT::v16i8, 8}, // constpool + load + tbl
6267 // Reverse can be lowered with `rev`.
6268 {TTI::SK_Reverse, MVT::v2i32, 1}, // REV64
6269 {TTI::SK_Reverse, MVT::v4i32, 2}, // REV64; EXT
6270 {TTI::SK_Reverse, MVT::v2i64, 1}, // EXT
6271 {TTI::SK_Reverse, MVT::v2f32, 1}, // REV64
6272 {TTI::SK_Reverse, MVT::v4f32, 2}, // REV64; EXT
6273 {TTI::SK_Reverse, MVT::v2f64, 1}, // EXT
6274 {TTI::SK_Reverse, MVT::v8f16, 2}, // REV64; EXT
6275 {TTI::SK_Reverse, MVT::v8bf16, 2}, // REV64; EXT
6276 {TTI::SK_Reverse, MVT::v8i16, 2}, // REV64; EXT
6277 {TTI::SK_Reverse, MVT::v16i8, 2}, // REV64; EXT
6278 {TTI::SK_Reverse, MVT::v4f16, 1}, // REV64
6279 {TTI::SK_Reverse, MVT::v4bf16, 1}, // REV64
6280 {TTI::SK_Reverse, MVT::v4i16, 1}, // REV64
6281 {TTI::SK_Reverse, MVT::v8i8, 1}, // REV64
6282 // Splice can all be lowered as `ext`.
6283 {TTI::SK_Splice, MVT::v2i32, 1},
6284 {TTI::SK_Splice, MVT::v4i32, 1},
6285 {TTI::SK_Splice, MVT::v2i64, 1},
6286 {TTI::SK_Splice, MVT::v2f32, 1},
6287 {TTI::SK_Splice, MVT::v4f32, 1},
6288 {TTI::SK_Splice, MVT::v2f64, 1},
6289 {TTI::SK_Splice, MVT::v8f16, 1},
6290 {TTI::SK_Splice, MVT::v8bf16, 1},
6291 {TTI::SK_Splice, MVT::v8i16, 1},
6292 {TTI::SK_Splice, MVT::v16i8, 1},
6293 {TTI::SK_Splice, MVT::v4f16, 1},
6294 {TTI::SK_Splice, MVT::v4bf16, 1},
6295 {TTI::SK_Splice, MVT::v4i16, 1},
6296 {TTI::SK_Splice, MVT::v8i8, 1},
6297 // Broadcast shuffle kinds for scalable vectors
6298 {TTI::SK_Broadcast, MVT::nxv16i8, 1},
6299 {TTI::SK_Broadcast, MVT::nxv8i16, 1},
6300 {TTI::SK_Broadcast, MVT::nxv4i32, 1},
6301 {TTI::SK_Broadcast, MVT::nxv2i64, 1},
6302 {TTI::SK_Broadcast, MVT::nxv2f16, 1},
6303 {TTI::SK_Broadcast, MVT::nxv4f16, 1},
6304 {TTI::SK_Broadcast, MVT::nxv8f16, 1},
6305 {TTI::SK_Broadcast, MVT::nxv2bf16, 1},
6306 {TTI::SK_Broadcast, MVT::nxv4bf16, 1},
6307 {TTI::SK_Broadcast, MVT::nxv8bf16, 1},
6308 {TTI::SK_Broadcast, MVT::nxv2f32, 1},
6309 {TTI::SK_Broadcast, MVT::nxv4f32, 1},
6310 {TTI::SK_Broadcast, MVT::nxv2f64, 1},
6311 {TTI::SK_Broadcast, MVT::nxv16i1, 1},
6312 {TTI::SK_Broadcast, MVT::nxv8i1, 1},
6313 {TTI::SK_Broadcast, MVT::nxv4i1, 1},
6314 {TTI::SK_Broadcast, MVT::nxv2i1, 1},
6315 // Handle the cases for vector.reverse with scalable vectors
6316 {TTI::SK_Reverse, MVT::nxv16i8, 1},
6317 {TTI::SK_Reverse, MVT::nxv8i16, 1},
6318 {TTI::SK_Reverse, MVT::nxv4i32, 1},
6319 {TTI::SK_Reverse, MVT::nxv2i64, 1},
6320 {TTI::SK_Reverse, MVT::nxv2f16, 1},
6321 {TTI::SK_Reverse, MVT::nxv4f16, 1},
6322 {TTI::SK_Reverse, MVT::nxv8f16, 1},
6323 {TTI::SK_Reverse, MVT::nxv2bf16, 1},
6324 {TTI::SK_Reverse, MVT::nxv4bf16, 1},
6325 {TTI::SK_Reverse, MVT::nxv8bf16, 1},
6326 {TTI::SK_Reverse, MVT::nxv2f32, 1},
6327 {TTI::SK_Reverse, MVT::nxv4f32, 1},
6328 {TTI::SK_Reverse, MVT::nxv2f64, 1},
6329 {TTI::SK_Reverse, MVT::nxv16i1, 1},
6330 {TTI::SK_Reverse, MVT::nxv8i1, 1},
6331 {TTI::SK_Reverse, MVT::nxv4i1, 1},
6332 {TTI::SK_Reverse, MVT::nxv2i1, 1},
6333 };
6334 if (const auto *Entry = CostTableLookup(ShuffleTbl, Kind, LT.second))
6335 return LT.first * Entry->Cost;
6336 }
6337
6338 if (Kind == TTI::SK_Splice && isa<ScalableVectorType>(SrcTy))
6339 return getSpliceCost(SrcTy, Index, CostKind);
6340
6341 // Inserting a subvector can often be done with either a D, S or H register
6342 // move, so long as the inserted vector is "aligned".
6343 if (Kind == TTI::SK_InsertSubvector && LT.second.isFixedLengthVector() &&
6344 LT.second.getSizeInBits() <= 128 && SubTp) {
6345 std::pair<InstructionCost, MVT> SubLT = getTypeLegalizationCost(SubTp);
6346 if (SubLT.second.isVector()) {
6347 int NumElts = LT.second.getVectorNumElements();
6348 int NumSubElts = SubLT.second.getVectorNumElements();
6349 if ((Index % NumSubElts) == 0 && (NumElts % NumSubElts) == 0)
6350 return SubLT.first;
6351 }
6352 }
6353
6354 // Restore optimal kind.
6355 if (IsExtractSubvector)
6357 return BaseT::getShuffleCost(Kind, DstTy, SrcTy, Mask, CostKind, Index, SubTp,
6358 Args, CxtI);
6359}
6360
6363 const DominatorTree &DT) {
6364 const auto &Strides = DenseMap<Value *, const SCEV *>();
6365 for (BasicBlock *BB : TheLoop->blocks()) {
6366 // Scan the instructions in the block and look for addresses that are
6367 // consecutive and decreasing.
6368 for (Instruction &I : *BB) {
6369 if (isa<LoadInst>(&I) || isa<StoreInst>(&I)) {
6371 Type *AccessTy = getLoadStoreType(&I);
6372 if (getPtrStride(*PSE, AccessTy, Ptr, TheLoop, DT, Strides,
6373 /*Assume=*/true, /*ShouldCheckWrap=*/false)
6374 .value_or(0) < 0)
6375 return true;
6376 }
6377 }
6378 }
6379 return false;
6380}
6381
6383 if (SVEPreferFixedOverScalableIfEqualCost.getNumOccurrences())
6385 // For cases like post-LTO vectorization, when we eventually know the trip
6386 // count, epilogue with fixed-width vectorization can be deleted if the trip
6387 // count is less than the epilogue iterations. That's why we prefer
6388 // fixed-width vectorization in epilogue in case of equal costs.
6389 if (IsEpilogue)
6390 return true;
6391 return ST->useFixedOverScalableIfEqualCost();
6392}
6393
6395 return ST->getEpilogueVectorizationMinVF();
6396}
6397
6399 if (!ST->hasSVE())
6400 return false;
6401
6402 // We don't currently support vectorisation with interleaving for SVE - with
6403 // such loops we're better off not using tail-folding. This gives us a chance
6404 // to fall back on fixed-width vectorisation using NEON's ld2/st2/etc.
6405 if (TFI->IAI->hasGroups())
6406 return false;
6407
6409 if (TFI->LVL->getReductionVars().size())
6411 if (TFI->LVL->getFixedOrderRecurrences().size())
6413
6414 // We call this to discover whether any load/store pointers in the loop have
6415 // negative strides. This will require extra work to reverse the loop
6416 // predicate, which may be expensive.
6419 *TFI->LVL->getDominatorTree()))
6423
6424 if (!TailFoldingOptionLoc.satisfies(ST->getSVETailFoldingDefaultOpts(),
6425 Required))
6426 return false;
6427
6428 // Don't tail-fold for tight loops where we would be better off interleaving
6429 // with an unpredicated loop.
6430 unsigned NumInsns = 0;
6431 for (BasicBlock *BB : TFI->LVL->getLoop()->blocks()) {
6432 NumInsns += BB->sizeWithoutDebug();
6433 }
6434
6435 // We expect 4 of these to be a IV PHI, IV add, IV compare and branch.
6436 return NumInsns >= SVETailFoldInsnThreshold;
6437}
6438
6441 StackOffset BaseOffset, bool HasBaseReg,
6442 int64_t Scale, unsigned AddrSpace) const {
6443 // Scaling factors are not free at all.
6444 // Operands | Rt Latency
6445 // -------------------------------------------
6446 // Rt, [Xn, Xm] | 4
6447 // -------------------------------------------
6448 // Rt, [Xn, Xm, lsl #imm] | Rn: 4 Rm: 5
6449 // Rt, [Xn, Wm, <extend> #imm] |
6451 AM.BaseGV = BaseGV;
6452 AM.BaseOffs = BaseOffset.getFixed();
6453 AM.HasBaseReg = HasBaseReg;
6454 AM.Scale = Scale;
6455 AM.ScalableOffset = BaseOffset.getScalable();
6456 if (getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace))
6457 // Scale represents reg2 * scale, thus account for 1 if
6458 // it is not equal to 0 or 1.
6459 return AM.Scale != 0 && AM.Scale != 1;
6461}
6462
6464 const Instruction *I) const {
6466 // For the binary operators (e.g. or) we need to be more careful than
6467 // selects, here we only transform them if they are already at a natural
6468 // break point in the code - the end of a block with an unconditional
6469 // terminator.
6470 if (I->getOpcode() == Instruction::Or &&
6471 isa<BranchInst>(I->getNextNode()) &&
6472 cast<BranchInst>(I->getNextNode())->isUnconditional())
6473 return true;
6474
6475 if (I->getOpcode() == Instruction::Add ||
6476 I->getOpcode() == Instruction::Sub)
6477 return true;
6478 }
6480}
6481
6484 const TargetTransformInfo::LSRCost &C2) const {
6485 // AArch64 specific here is adding the number of instructions to the
6486 // comparison (though not as the first consideration, as some targets do)
6487 // along with changing the priority of the base additions.
6488 // TODO: Maybe a more nuanced tradeoff between instruction count
6489 // and number of registers? To be investigated at a later date.
6490 if (EnableLSRCostOpt)
6491 return std::tie(C1.NumRegs, C1.Insns, C1.NumBaseAdds, C1.AddRecCost,
6492 C1.NumIVMuls, C1.ScaleCost, C1.ImmCost, C1.SetupCost) <
6493 std::tie(C2.NumRegs, C2.Insns, C2.NumBaseAdds, C2.AddRecCost,
6494 C2.NumIVMuls, C2.ScaleCost, C2.ImmCost, C2.SetupCost);
6495
6497}
6498
6499static bool isSplatShuffle(Value *V) {
6500 if (auto *Shuf = dyn_cast<ShuffleVectorInst>(V))
6501 return all_equal(Shuf->getShuffleMask());
6502 return false;
6503}
6504
6505/// Check if both Op1 and Op2 are shufflevector extracts of either the lower
6506/// or upper half of the vector elements.
6507static bool areExtractShuffleVectors(Value *Op1, Value *Op2,
6508 bool AllowSplat = false) {
6509 // Scalable types can't be extract shuffle vectors.
6510 if (Op1->getType()->isScalableTy() || Op2->getType()->isScalableTy())
6511 return false;
6512
6513 auto areTypesHalfed = [](Value *FullV, Value *HalfV) {
6514 auto *FullTy = FullV->getType();
6515 auto *HalfTy = HalfV->getType();
6516 return FullTy->getPrimitiveSizeInBits().getFixedValue() ==
6517 2 * HalfTy->getPrimitiveSizeInBits().getFixedValue();
6518 };
6519
6520 auto extractHalf = [](Value *FullV, Value *HalfV) {
6521 auto *FullVT = cast<FixedVectorType>(FullV->getType());
6522 auto *HalfVT = cast<FixedVectorType>(HalfV->getType());
6523 return FullVT->getNumElements() == 2 * HalfVT->getNumElements();
6524 };
6525
6526 ArrayRef<int> M1, M2;
6527 Value *S1Op1 = nullptr, *S2Op1 = nullptr;
6528 if (!match(Op1, m_Shuffle(m_Value(S1Op1), m_Undef(), m_Mask(M1))) ||
6529 !match(Op2, m_Shuffle(m_Value(S2Op1), m_Undef(), m_Mask(M2))))
6530 return false;
6531
6532 // If we allow splats, set S1Op1/S2Op1 to nullptr for the relevant arg so that
6533 // it is not checked as an extract below.
6534 if (AllowSplat && isSplatShuffle(Op1))
6535 S1Op1 = nullptr;
6536 if (AllowSplat && isSplatShuffle(Op2))
6537 S2Op1 = nullptr;
6538
6539 // Check that the operands are half as wide as the result and we extract
6540 // half of the elements of the input vectors.
6541 if ((S1Op1 && (!areTypesHalfed(S1Op1, Op1) || !extractHalf(S1Op1, Op1))) ||
6542 (S2Op1 && (!areTypesHalfed(S2Op1, Op2) || !extractHalf(S2Op1, Op2))))
6543 return false;
6544
6545 // Check the mask extracts either the lower or upper half of vector
6546 // elements.
6547 int M1Start = 0;
6548 int M2Start = 0;
6549 int NumElements = cast<FixedVectorType>(Op1->getType())->getNumElements() * 2;
6550 if ((S1Op1 &&
6551 !ShuffleVectorInst::isExtractSubvectorMask(M1, NumElements, M1Start)) ||
6552 (S2Op1 &&
6553 !ShuffleVectorInst::isExtractSubvectorMask(M2, NumElements, M2Start)))
6554 return false;
6555
6556 if ((M1Start != 0 && M1Start != (NumElements / 2)) ||
6557 (M2Start != 0 && M2Start != (NumElements / 2)))
6558 return false;
6559 if (S1Op1 && S2Op1 && M1Start != M2Start)
6560 return false;
6561
6562 return true;
6563}
6564
6565/// Check if Ext1 and Ext2 are extends of the same type, doubling the bitwidth
6566/// of the vector elements.
6567static bool areExtractExts(Value *Ext1, Value *Ext2) {
6568 auto areExtDoubled = [](Instruction *Ext) {
6569 return Ext->getType()->getScalarSizeInBits() ==
6570 2 * Ext->getOperand(0)->getType()->getScalarSizeInBits();
6571 };
6572
6573 if (!match(Ext1, m_ZExtOrSExt(m_Value())) ||
6574 !match(Ext2, m_ZExtOrSExt(m_Value())) ||
6575 !areExtDoubled(cast<Instruction>(Ext1)) ||
6576 !areExtDoubled(cast<Instruction>(Ext2)))
6577 return false;
6578
6579 return true;
6580}
6581
6582/// Check if Op could be used with vmull_high_p64 intrinsic.
6584 Value *VectorOperand = nullptr;
6585 ConstantInt *ElementIndex = nullptr;
6586 return match(Op, m_ExtractElt(m_Value(VectorOperand),
6587 m_ConstantInt(ElementIndex))) &&
6588 ElementIndex->getValue() == 1 &&
6589 isa<FixedVectorType>(VectorOperand->getType()) &&
6590 cast<FixedVectorType>(VectorOperand->getType())->getNumElements() == 2;
6591}
6592
6593/// Check if Op1 and Op2 could be used with vmull_high_p64 intrinsic.
6594static bool areOperandsOfVmullHighP64(Value *Op1, Value *Op2) {
6596}
6597
6599 // Restrict ourselves to the form CodeGenPrepare typically constructs.
6600 auto *GEP = dyn_cast<GetElementPtrInst>(Ptrs);
6601 if (!GEP || GEP->getNumOperands() != 2)
6602 return false;
6603
6604 Value *Base = GEP->getOperand(0);
6605 Value *Offsets = GEP->getOperand(1);
6606
6607 // We only care about scalar_base+vector_offsets.
6608 if (Base->getType()->isVectorTy() || !Offsets->getType()->isVectorTy())
6609 return false;
6610
6611 // Sink extends that would allow us to use 32-bit offset vectors.
6612 if (isa<SExtInst>(Offsets) || isa<ZExtInst>(Offsets)) {
6613 auto *OffsetsInst = cast<Instruction>(Offsets);
6614 if (OffsetsInst->getType()->getScalarSizeInBits() > 32 &&
6615 OffsetsInst->getOperand(0)->getType()->getScalarSizeInBits() <= 32)
6616 Ops.push_back(&GEP->getOperandUse(1));
6617 }
6618
6619 // Sink the GEP.
6620 return true;
6621}
6622
6623/// We want to sink following cases:
6624/// (add|sub|gep) A, ((mul|shl) vscale, imm); (add|sub|gep) A, vscale;
6625/// (add|sub|gep) A, ((mul|shl) zext(vscale), imm);
6627 if (match(Op, m_VScale()))
6628 return true;
6629 if (match(Op, m_Shl(m_VScale(), m_ConstantInt())) ||
6631 Ops.push_back(&cast<Instruction>(Op)->getOperandUse(0));
6632 return true;
6633 }
6634 if (match(Op, m_Shl(m_ZExt(m_VScale()), m_ConstantInt())) ||
6636 Value *ZExtOp = cast<Instruction>(Op)->getOperand(0);
6637 Ops.push_back(&cast<Instruction>(ZExtOp)->getOperandUse(0));
6638 Ops.push_back(&cast<Instruction>(Op)->getOperandUse(0));
6639 return true;
6640 }
6641 return false;
6642}
6643
6644static bool isFNeg(Value *Op) { return match(Op, m_FNeg(m_Value())); }
6645
6646/// Check if sinking \p I's operands to I's basic block is profitable, because
6647/// the operands can be folded into a target instruction, e.g.
6648/// shufflevectors extracts and/or sext/zext can be folded into (u,s)subl(2).
6652 switch (II->getIntrinsicID()) {
6653 case Intrinsic::aarch64_neon_smull:
6654 case Intrinsic::aarch64_neon_umull:
6655 if (areExtractShuffleVectors(II->getOperand(0), II->getOperand(1),
6656 /*AllowSplat=*/true)) {
6657 Ops.push_back(&II->getOperandUse(0));
6658 Ops.push_back(&II->getOperandUse(1));
6659 return true;
6660 }
6661 [[fallthrough]];
6662
6663 case Intrinsic::fma:
6664 case Intrinsic::fmuladd:
6665 if (isa<VectorType>(I->getType()) &&
6666 cast<VectorType>(I->getType())->getElementType()->isHalfTy() &&
6667 !ST->hasFullFP16())
6668 return false;
6669
6670 if (isFNeg(II->getOperand(0)))
6671 Ops.push_back(&II->getOperandUse(0));
6672 if (isFNeg(II->getOperand(1)))
6673 Ops.push_back(&II->getOperandUse(1));
6674
6675 [[fallthrough]];
6676 case Intrinsic::aarch64_neon_sqdmull:
6677 case Intrinsic::aarch64_neon_sqdmulh:
6678 case Intrinsic::aarch64_neon_sqrdmulh:
6679 // Sink splats for index lane variants
6680 if (isSplatShuffle(II->getOperand(0)))
6681 Ops.push_back(&II->getOperandUse(0));
6682 if (isSplatShuffle(II->getOperand(1)))
6683 Ops.push_back(&II->getOperandUse(1));
6684 return !Ops.empty();
6685 case Intrinsic::aarch64_neon_fmlal:
6686 case Intrinsic::aarch64_neon_fmlal2:
6687 case Intrinsic::aarch64_neon_fmlsl:
6688 case Intrinsic::aarch64_neon_fmlsl2:
6689 // Sink splats for index lane variants
6690 if (isSplatShuffle(II->getOperand(1)))
6691 Ops.push_back(&II->getOperandUse(1));
6692 if (isSplatShuffle(II->getOperand(2)))
6693 Ops.push_back(&II->getOperandUse(2));
6694 return !Ops.empty();
6695 case Intrinsic::aarch64_sve_ptest_first:
6696 case Intrinsic::aarch64_sve_ptest_last:
6697 if (auto *IIOp = dyn_cast<IntrinsicInst>(II->getOperand(0)))
6698 if (IIOp->getIntrinsicID() == Intrinsic::aarch64_sve_ptrue)
6699 Ops.push_back(&II->getOperandUse(0));
6700 return !Ops.empty();
6701 case Intrinsic::aarch64_sme_write_horiz:
6702 case Intrinsic::aarch64_sme_write_vert:
6703 case Intrinsic::aarch64_sme_writeq_horiz:
6704 case Intrinsic::aarch64_sme_writeq_vert: {
6705 auto *Idx = dyn_cast<Instruction>(II->getOperand(1));
6706 if (!Idx || Idx->getOpcode() != Instruction::Add)
6707 return false;
6708 Ops.push_back(&II->getOperandUse(1));
6709 return true;
6710 }
6711 case Intrinsic::aarch64_sme_read_horiz:
6712 case Intrinsic::aarch64_sme_read_vert:
6713 case Intrinsic::aarch64_sme_readq_horiz:
6714 case Intrinsic::aarch64_sme_readq_vert:
6715 case Intrinsic::aarch64_sme_ld1b_vert:
6716 case Intrinsic::aarch64_sme_ld1h_vert:
6717 case Intrinsic::aarch64_sme_ld1w_vert:
6718 case Intrinsic::aarch64_sme_ld1d_vert:
6719 case Intrinsic::aarch64_sme_ld1q_vert:
6720 case Intrinsic::aarch64_sme_st1b_vert:
6721 case Intrinsic::aarch64_sme_st1h_vert:
6722 case Intrinsic::aarch64_sme_st1w_vert:
6723 case Intrinsic::aarch64_sme_st1d_vert:
6724 case Intrinsic::aarch64_sme_st1q_vert:
6725 case Intrinsic::aarch64_sme_ld1b_horiz:
6726 case Intrinsic::aarch64_sme_ld1h_horiz:
6727 case Intrinsic::aarch64_sme_ld1w_horiz:
6728 case Intrinsic::aarch64_sme_ld1d_horiz:
6729 case Intrinsic::aarch64_sme_ld1q_horiz:
6730 case Intrinsic::aarch64_sme_st1b_horiz:
6731 case Intrinsic::aarch64_sme_st1h_horiz:
6732 case Intrinsic::aarch64_sme_st1w_horiz:
6733 case Intrinsic::aarch64_sme_st1d_horiz:
6734 case Intrinsic::aarch64_sme_st1q_horiz: {
6735 auto *Idx = dyn_cast<Instruction>(II->getOperand(3));
6736 if (!Idx || Idx->getOpcode() != Instruction::Add)
6737 return false;
6738 Ops.push_back(&II->getOperandUse(3));
6739 return true;
6740 }
6741 case Intrinsic::aarch64_neon_pmull:
6742 if (!areExtractShuffleVectors(II->getOperand(0), II->getOperand(1)))
6743 return false;
6744 Ops.push_back(&II->getOperandUse(0));
6745 Ops.push_back(&II->getOperandUse(1));
6746 return true;
6747 case Intrinsic::aarch64_neon_pmull64:
6748 if (!areOperandsOfVmullHighP64(II->getArgOperand(0),
6749 II->getArgOperand(1)))
6750 return false;
6751 Ops.push_back(&II->getArgOperandUse(0));
6752 Ops.push_back(&II->getArgOperandUse(1));
6753 return true;
6754 case Intrinsic::masked_gather:
6755 if (!shouldSinkVectorOfPtrs(II->getArgOperand(0), Ops))
6756 return false;
6757 Ops.push_back(&II->getArgOperandUse(0));
6758 return true;
6759 case Intrinsic::masked_scatter:
6760 if (!shouldSinkVectorOfPtrs(II->getArgOperand(1), Ops))
6761 return false;
6762 Ops.push_back(&II->getArgOperandUse(1));
6763 return true;
6764 default:
6765 return false;
6766 }
6767 }
6768
6769 auto ShouldSinkCondition = [](Value *Cond,
6770 SmallVectorImpl<Use *> &Ops) -> bool {
6772 return false;
6774 if (II->getIntrinsicID() != Intrinsic::vector_reduce_or ||
6775 !isa<ScalableVectorType>(II->getOperand(0)->getType()))
6776 return false;
6777 if (isa<CmpInst>(II->getOperand(0)))
6778 Ops.push_back(&II->getOperandUse(0));
6779 return true;
6780 };
6781
6782 switch (I->getOpcode()) {
6783 case Instruction::GetElementPtr:
6784 case Instruction::Add:
6785 case Instruction::Sub:
6786 // Sink vscales closer to uses for better isel
6787 for (unsigned Op = 0; Op < I->getNumOperands(); ++Op) {
6788 if (shouldSinkVScale(I->getOperand(Op), Ops)) {
6789 Ops.push_back(&I->getOperandUse(Op));
6790 return true;
6791 }
6792 }
6793 break;
6794 case Instruction::Select: {
6795 if (!ShouldSinkCondition(I->getOperand(0), Ops))
6796 return false;
6797
6798 Ops.push_back(&I->getOperandUse(0));
6799 return true;
6800 }
6801 case Instruction::Br: {
6802 if (cast<BranchInst>(I)->isUnconditional())
6803 return false;
6804
6805 if (!ShouldSinkCondition(cast<BranchInst>(I)->getCondition(), Ops))
6806 return false;
6807
6808 Ops.push_back(&I->getOperandUse(0));
6809 return true;
6810 }
6811 case Instruction::FMul:
6812 // fmul with contract flag can be combined with fadd into fma.
6813 // Sinking fneg into this block enables fmls pattern.
6814 if (cast<FPMathOperator>(I)->hasAllowContract()) {
6815 if (isFNeg(I->getOperand(0)))
6816 Ops.push_back(&I->getOperandUse(0));
6817 if (isFNeg(I->getOperand(1)))
6818 Ops.push_back(&I->getOperandUse(1));
6819 }
6820 break;
6821
6822 default:
6823 break;
6824 }
6825
6826 if (!I->getType()->isVectorTy())
6827 return !Ops.empty();
6828
6829 switch (I->getOpcode()) {
6830 case Instruction::Sub:
6831 case Instruction::Add: {
6832 if (!areExtractExts(I->getOperand(0), I->getOperand(1)))
6833 return false;
6834
6835 // If the exts' operands extract either the lower or upper elements, we
6836 // can sink them too.
6837 auto Ext1 = cast<Instruction>(I->getOperand(0));
6838 auto Ext2 = cast<Instruction>(I->getOperand(1));
6839 if (areExtractShuffleVectors(Ext1->getOperand(0), Ext2->getOperand(0))) {
6840 Ops.push_back(&Ext1->getOperandUse(0));
6841 Ops.push_back(&Ext2->getOperandUse(0));
6842 }
6843
6844 Ops.push_back(&I->getOperandUse(0));
6845 Ops.push_back(&I->getOperandUse(1));
6846
6847 return true;
6848 }
6849 case Instruction::Or: {
6850 // Pattern: Or(And(MaskValue, A), And(Not(MaskValue), B)) ->
6851 // bitselect(MaskValue, A, B) where Not(MaskValue) = Xor(MaskValue, -1)
6852 if (ST->hasNEON()) {
6853 Instruction *OtherAnd, *IA, *IB;
6854 Value *MaskValue;
6855 // MainAnd refers to And instruction that has 'Not' as one of its operands
6856 if (match(I, m_c_Or(m_OneUse(m_Instruction(OtherAnd)),
6857 m_OneUse(m_c_And(m_OneUse(m_Not(m_Value(MaskValue))),
6858 m_Instruction(IA)))))) {
6859 if (match(OtherAnd,
6860 m_c_And(m_Specific(MaskValue), m_Instruction(IB)))) {
6861 Instruction *MainAnd = I->getOperand(0) == OtherAnd
6862 ? cast<Instruction>(I->getOperand(1))
6863 : cast<Instruction>(I->getOperand(0));
6864
6865 // Both Ands should be in same basic block as Or
6866 if (I->getParent() != MainAnd->getParent() ||
6867 I->getParent() != OtherAnd->getParent())
6868 return false;
6869
6870 // Non-mask operands of both Ands should also be in same basic block
6871 if (I->getParent() != IA->getParent() ||
6872 I->getParent() != IB->getParent())
6873 return false;
6874
6875 Ops.push_back(
6876 &MainAnd->getOperandUse(MainAnd->getOperand(0) == IA ? 1 : 0));
6877 Ops.push_back(&I->getOperandUse(0));
6878 Ops.push_back(&I->getOperandUse(1));
6879
6880 return true;
6881 }
6882 }
6883 }
6884
6885 return false;
6886 }
6887 case Instruction::Mul: {
6888 auto ShouldSinkSplatForIndexedVariant = [](Value *V) {
6889 auto *Ty = cast<VectorType>(V->getType());
6890 // For SVE the lane-indexing is within 128-bits, so we can't fold splats.
6891 if (Ty->isScalableTy())
6892 return false;
6893
6894 // Indexed variants of Mul exist for i16 and i32 element types only.
6895 return Ty->getScalarSizeInBits() == 16 || Ty->getScalarSizeInBits() == 32;
6896 };
6897
6898 int NumZExts = 0, NumSExts = 0;
6899 for (auto &Op : I->operands()) {
6900 // Make sure we are not already sinking this operand
6901 if (any_of(Ops, [&](Use *U) { return U->get() == Op; }))
6902 continue;
6903
6904 if (match(&Op, m_ZExtOrSExt(m_Value()))) {
6905 auto *Ext = cast<Instruction>(Op);
6906 auto *ExtOp = Ext->getOperand(0);
6907 if (isSplatShuffle(ExtOp) && ShouldSinkSplatForIndexedVariant(ExtOp))
6908 Ops.push_back(&Ext->getOperandUse(0));
6909 Ops.push_back(&Op);
6910
6911 if (isa<SExtInst>(Ext)) {
6912 NumSExts++;
6913 } else {
6914 NumZExts++;
6915 // A zext(a) is also a sext(zext(a)), if we take more than 2 steps.
6916 if (Ext->getOperand(0)->getType()->getScalarSizeInBits() * 2 <
6917 I->getType()->getScalarSizeInBits())
6918 NumSExts++;
6919 }
6920
6921 continue;
6922 }
6923
6925 if (!Shuffle)
6926 continue;
6927
6928 // If the Shuffle is a splat and the operand is a zext/sext, sinking the
6929 // operand and the s/zext can help create indexed s/umull. This is
6930 // especially useful to prevent i64 mul being scalarized.
6931 if (isSplatShuffle(Shuffle) &&
6932 match(Shuffle->getOperand(0), m_ZExtOrSExt(m_Value()))) {
6933 Ops.push_back(&Shuffle->getOperandUse(0));
6934 Ops.push_back(&Op);
6935 if (match(Shuffle->getOperand(0), m_SExt(m_Value())))
6936 NumSExts++;
6937 else
6938 NumZExts++;
6939 continue;
6940 }
6941
6942 Value *ShuffleOperand = Shuffle->getOperand(0);
6943 InsertElementInst *Insert = dyn_cast<InsertElementInst>(ShuffleOperand);
6944 if (!Insert)
6945 continue;
6946
6947 Instruction *OperandInstr = dyn_cast<Instruction>(Insert->getOperand(1));
6948 if (!OperandInstr)
6949 continue;
6950
6951 ConstantInt *ElementConstant =
6952 dyn_cast<ConstantInt>(Insert->getOperand(2));
6953 // Check that the insertelement is inserting into element 0
6954 if (!ElementConstant || !ElementConstant->isZero())
6955 continue;
6956
6957 unsigned Opcode = OperandInstr->getOpcode();
6958 if (Opcode == Instruction::SExt)
6959 NumSExts++;
6960 else if (Opcode == Instruction::ZExt)
6961 NumZExts++;
6962 else {
6963 // If we find that the top bits are known 0, then we can sink and allow
6964 // the backend to generate a umull.
6965 unsigned Bitwidth = I->getType()->getScalarSizeInBits();
6966 APInt UpperMask = APInt::getHighBitsSet(Bitwidth, Bitwidth / 2);
6967 if (!MaskedValueIsZero(OperandInstr, UpperMask, DL))
6968 continue;
6969 NumZExts++;
6970 }
6971
6972 // And(Load) is excluded to prevent CGP getting stuck in a loop of sinking
6973 // the And, just to hoist it again back to the load.
6974 if (!match(OperandInstr, m_And(m_Load(m_Value()), m_Value())))
6975 Ops.push_back(&Insert->getOperandUse(1));
6976 Ops.push_back(&Shuffle->getOperandUse(0));
6977 Ops.push_back(&Op);
6978 }
6979
6980 // It is profitable to sink if we found two of the same type of extends.
6981 if (!Ops.empty() && (NumSExts == 2 || NumZExts == 2))
6982 return true;
6983
6984 // Otherwise, see if we should sink splats for indexed variants.
6985 if (!ShouldSinkSplatForIndexedVariant(I))
6986 return false;
6987
6988 Ops.clear();
6989 if (isSplatShuffle(I->getOperand(0)))
6990 Ops.push_back(&I->getOperandUse(0));
6991 if (isSplatShuffle(I->getOperand(1)))
6992 Ops.push_back(&I->getOperandUse(1));
6993
6994 return !Ops.empty();
6995 }
6996 case Instruction::FMul: {
6997 // For SVE the lane-indexing is within 128-bits, so we can't fold splats.
6998 if (I->getType()->isScalableTy())
6999 return !Ops.empty();
7000
7001 if (cast<VectorType>(I->getType())->getElementType()->isHalfTy() &&
7002 !ST->hasFullFP16())
7003 return !Ops.empty();
7004
7005 // Sink splats for index lane variants
7006 if (isSplatShuffle(I->getOperand(0)))
7007 Ops.push_back(&I->getOperandUse(0));
7008 if (isSplatShuffle(I->getOperand(1)))
7009 Ops.push_back(&I->getOperandUse(1));
7010 return !Ops.empty();
7011 }
7012 default:
7013 return false;
7014 }
7015 return false;
7016}
static bool isAllActivePredicate(SelectionDAG &DAG, SDValue N)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static std::optional< Instruction * > instCombinePTrue(InstCombiner &IC, IntrinsicInst &II)
TailFoldingOption TailFoldingOptionLoc
static std::optional< Instruction * > instCombineSVEVectorFAdd(InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineSVEVectorFuseMulAddSub(InstCombiner &IC, IntrinsicInst &II, bool MergeIntoAddendOp)
static void getFalkorUnrollingPreferences(Loop *L, ScalarEvolution &SE, TargetTransformInfo::UnrollingPreferences &UP)
bool SimplifyValuePattern(SmallVector< Value * > &Vec, bool AllowPoison)
static std::optional< Instruction * > instCombineSVESel(InstCombiner &IC, IntrinsicInst &II)
static bool hasPossibleIncompatibleOps(const Function *F, const AArch64TargetLowering &TLI)
Returns true if the function has explicit operations that can only be lowered using incompatible inst...
static bool shouldSinkVScale(Value *Op, SmallVectorImpl< Use * > &Ops)
We want to sink following cases: (add|sub|gep) A, ((mul|shl) vscale, imm); (add|sub|gep) A,...
static InstructionCost getHistogramCost(const AArch64Subtarget *ST, const IntrinsicCostAttributes &ICA)
static std::optional< Instruction * > tryCombineFromSVBoolBinOp(InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineSVEUnpack(InstCombiner &IC, IntrinsicInst &II)
static cl::opt< unsigned > SVETailFoldInsnThreshold("sve-tail-folding-insn-threshold", cl::init(15), cl::Hidden)
static cl::opt< bool > EnableFixedwidthAutovecInStreamingMode("enable-fixedwidth-autovec-in-streaming-mode", cl::init(false), cl::Hidden)
static void getAppleRuntimeUnrollPreferences(Loop *L, ScalarEvolution &SE, TargetTransformInfo::UnrollingPreferences &UP, const AArch64TTIImpl &TTI)
For Apple CPUs, we want to runtime-unroll loops to make better use if the OOO engine's wide instructi...
static std::optional< Instruction * > instCombineWhilelo(InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineSVEVectorFAddU(InstCombiner &IC, IntrinsicInst &II)
static bool areExtractExts(Value *Ext1, Value *Ext2)
Check if Ext1 and Ext2 are extends of the same type, doubling the bitwidth of the vector elements.
static cl::opt< bool > EnableLSRCostOpt("enable-aarch64-lsr-cost-opt", cl::init(true), cl::Hidden)
static bool shouldSinkVectorOfPtrs(Value *Ptrs, SmallVectorImpl< Use * > &Ops)
static bool shouldUnrollMultiExitLoop(Loop *L, ScalarEvolution &SE, const AArch64TTIImpl &TTI)
static std::optional< Instruction * > simplifySVEIntrinsicBinOp(InstCombiner &IC, IntrinsicInst &II, const SVEIntrinsicInfo &IInfo)
static std::optional< Instruction * > instCombineSVEVectorSub(InstCombiner &IC, IntrinsicInst &II)
static bool isLoopSizeWithinBudget(Loop *L, const AArch64TTIImpl &TTI, InstructionCost Budget, unsigned *FinalSize)
static std::optional< Instruction * > instCombineLD1GatherIndex(InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineSVEVectorFSub(InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > processPhiNode(InstCombiner &IC, IntrinsicInst &II)
The function will remove redundant reinterprets casting in the presence of the control flow.
static std::optional< Instruction * > instCombineSVEInsr(InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineSMECntsd(InstCombiner &IC, IntrinsicInst &II, const AArch64Subtarget *ST)
static void extractAttrFeatures(const Function &F, const AArch64TTIImpl *TTI, SmallVectorImpl< StringRef > &Features)
static std::optional< Instruction * > instCombineST1ScatterIndex(InstCombiner &IC, IntrinsicInst &II)
static bool isSMEABIRoutineCall(const CallInst &CI, const AArch64TargetLowering &TLI)
static std::optional< Instruction * > instCombineSVESDIV(InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineSVEST1(InstCombiner &IC, IntrinsicInst &II, const DataLayout &DL)
static Value * stripInactiveLanes(Value *V, const Value *Pg)
static cl::opt< bool > SVEPreferFixedOverScalableIfEqualCost("sve-prefer-fixed-over-scalable-if-equal", cl::Hidden)
static bool isUnpackedVectorVT(EVT VecVT)
static std::optional< Instruction * > instCombineSVEDupX(InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineSVECmpNE(InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineDMB(InstCombiner &IC, IntrinsicInst &II)
static SVEIntrinsicInfo constructSVEIntrinsicInfo(IntrinsicInst &II)
static std::optional< Instruction * > instCombineSVEVectorFSubU(InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineRDFFR(InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineMaxMinNM(InstCombiner &IC, IntrinsicInst &II)
static cl::opt< unsigned > SVEGatherOverhead("sve-gather-overhead", cl::init(10), cl::Hidden)
static std::optional< Instruction * > instCombineSVECondLast(InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineSVEPTest(InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineSVEZip(InstCombiner &IC, IntrinsicInst &II)
static cl::opt< int > Aarch64ForceUnrollThreshold("aarch64-force-unroll-threshold", cl::init(0), cl::Hidden, cl::desc("Threshold for forced unrolling of small loops in AArch64"))
static std::optional< Instruction * > instCombineSVEDup(InstCombiner &IC, IntrinsicInst &II)
static cl::opt< unsigned > BaseHistCntCost("aarch64-base-histcnt-cost", cl::init(8), cl::Hidden, cl::desc("The cost of a histcnt instruction"))
static std::optional< Instruction * > instCombineConvertFromSVBool(InstCombiner &IC, IntrinsicInst &II)
static cl::opt< unsigned > CallPenaltyChangeSM("call-penalty-sm-change", cl::init(5), cl::Hidden, cl::desc("Penalty of calling a function that requires a change to PSTATE.SM"))
static std::optional< Instruction * > instCombineSVEUzp1(InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineSVEVectorBinOp(InstCombiner &IC, IntrinsicInst &II)
static cl::opt< bool > EnableScalableAutovecInStreamingMode("enable-scalable-autovec-in-streaming-mode", cl::init(false), cl::Hidden)
static std::optional< Instruction * > instCombineSVETBL(InstCombiner &IC, IntrinsicInst &II)
static bool areOperandsOfVmullHighP64(Value *Op1, Value *Op2)
Check if Op1 and Op2 could be used with vmull_high_p64 intrinsic.
static bool isFNeg(Value *Op)
static Instruction::BinaryOps intrinsicIDToBinOpCode(unsigned Intrinsic)
static bool containsDecreasingPointers(Loop *TheLoop, PredicatedScalarEvolution *PSE, const DominatorTree &DT)
static bool isSplatShuffle(Value *V)
static cl::opt< unsigned > InlineCallPenaltyChangeSM("inline-call-penalty-sm-change", cl::init(10), cl::Hidden, cl::desc("Penalty of inlining a call that requires a change to PSTATE.SM"))
static std::optional< Instruction * > instCombineSVELD1(InstCombiner &IC, IntrinsicInst &II, const DataLayout &DL)
static std::optional< Instruction * > instCombineSVESrshl(InstCombiner &IC, IntrinsicInst &II)
static cl::opt< unsigned > DMBLookaheadThreshold("dmb-lookahead-threshold", cl::init(10), cl::Hidden, cl::desc("The number of instructions to search for a redundant dmb"))
static std::optional< Instruction * > simplifySVEIntrinsic(InstCombiner &IC, IntrinsicInst &II, const SVEIntrinsicInfo &IInfo)
static unsigned getSVEGatherScatterOverhead(unsigned Opcode, const AArch64Subtarget *ST)
static bool isOperandOfVmullHighP64(Value *Op)
Check if Op could be used with vmull_high_p64 intrinsic.
static std::optional< Instruction * > instCombineInStreamingMode(InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineSVELast(InstCombiner &IC, IntrinsicInst &II)
static cl::opt< unsigned > NeonNonConstStrideOverhead("neon-nonconst-stride-overhead", cl::init(10), cl::Hidden)
static cl::opt< bool > EnableFalkorHWPFUnrollFix("enable-falkor-hwpf-unroll-fix", cl::init(true), cl::Hidden)
static std::optional< Instruction * > instCombineSVECntElts(InstCombiner &IC, IntrinsicInst &II, unsigned NumElts)
static std::optional< Instruction * > instCombineSVEUxt(InstCombiner &IC, IntrinsicInst &II, unsigned NumBits)
static cl::opt< TailFoldingOption, true, cl::parser< std::string > > SVETailFolding("sve-tail-folding", cl::desc("Control the use of vectorisation using tail-folding for SVE where the" " option is specified in the form (Initial)[+(Flag1|Flag2|...)]:" "\ndisabled (Initial) No loop types will vectorize using " "tail-folding" "\ndefault (Initial) Uses the default tail-folding settings for " "the target CPU" "\nall (Initial) All legal loop types will vectorize using " "tail-folding" "\nsimple (Initial) Use tail-folding for simple loops (not " "reductions or recurrences)" "\nreductions Use tail-folding for loops containing reductions" "\nnoreductions Inverse of above" "\nrecurrences Use tail-folding for loops containing fixed order " "recurrences" "\nnorecurrences Inverse of above" "\nreverse Use tail-folding for loops requiring reversed " "predicates" "\nnoreverse Inverse of above"), cl::location(TailFoldingOptionLoc))
static bool areExtractShuffleVectors(Value *Op1, Value *Op2, bool AllowSplat=false)
Check if both Op1 and Op2 are shufflevector extracts of either the lower or upper half of the vector ...
static std::optional< Instruction * > instCombineSVEVectorAdd(InstCombiner &IC, IntrinsicInst &II)
static cl::opt< bool > EnableOrLikeSelectOpt("enable-aarch64-or-like-select", cl::init(true), cl::Hidden)
static cl::opt< unsigned > SVEScatterOverhead("sve-scatter-overhead", cl::init(10), cl::Hidden)
static std::optional< Instruction * > instCombineSVEDupqLane(InstCombiner &IC, IntrinsicInst &II)
This file a TargetTransformInfoImplBase conforming object specific to the AArch64 target machine.
AMDGPU Register Bank Select
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file provides a helper that implements much of the TTI interface in terms of the target-independ...
static Error reportError(StringRef Message)
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
Cost tables and simple lookup functions.
This file defines the DenseMap class.
@ Default
static Value * getCondition(Instruction *I)
Hexagon Common GEP
const HexagonInstrInfo * TII
#define _
This file provides the interface for the instcombine pass implementation.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static LVOptions Options
Definition LVOptions.cpp:25
This file defines the LoopVectorizationLegality class.
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
static const Function * getCalledFunction(const Value *V)
#define T
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
#define P(N)
const SmallVectorImpl< MachineOperand > & Cond
static uint64_t getBits(uint64_t Val, int Start, int End)
#define LLVM_DEBUG(...)
Definition Debug.h:114
static unsigned getScalarSizeInBits(Type *Ty)
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
This file describes how to lower LLVM code to machine code.
This pass exposes codegen information to IR-level passes.
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
Value * RHS
Value * LHS
BinaryOperator * Mul
unsigned getVectorInsertExtractBaseCost() const
InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind) const override
InstructionCost getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, bool ForPoisonSrc=true, ArrayRef< Value * > VL={}, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const override
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
InstructionCost getCostOfKeepingLiveOverCall(ArrayRef< Type * > Tys) const override
unsigned getMaxInterleaveFactor(ElementCount VF) const override
InstructionCost getMaskedMemoryOpCost(const MemIntrinsicCostAttributes &MICA, TTI::TargetCostKind CostKind) const
InstructionCost getGatherScatterOpCost(const MemIntrinsicCostAttributes &MICA, TTI::TargetCostKind CostKind) const
bool isLegalBroadcastLoad(Type *ElementTy, ElementCount NumElements) const override
InstructionCost getAddressComputationCost(Type *PtrTy, ScalarEvolution *SE, const SCEV *Ptr, TTI::TargetCostKind CostKind) const override
bool isExtPartOfAvgExpr(const Instruction *ExtUser, Type *Dst, Type *Src) const
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, const Value *Op0, const Value *Op1, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const override
InstructionCost getIntImmCost(int64_t Val) const
Calculate the cost of materializing a 64-bit value.
std::optional< InstructionCost > getFP16BF16PromoteCost(Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info, TTI::OperandValueInfo Op2Info, bool IncludeTrunc, bool CanUseSVE, std::function< InstructionCost(Type *)> InstCost) const
FP16 and BF16 operations are lowered to fptrunc(op(fpext, fpext) if the architecture features are not...
bool prefersVectorizedAddressing() const override
InstructionCost getIndexedVectorInstrCostFromEnd(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index) const override
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const override
InstructionCost getMulAccReductionCost(bool IsUnsigned, unsigned RedOpcode, Type *ResTy, VectorType *Ty, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput) const override
InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind, Instruction *Inst=nullptr) const override
bool isElementTypeLegalForScalableVector(Type *Ty) const override
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP) const override
InstructionCost getPartialReductionCost(unsigned Opcode, Type *InputTypeA, Type *InputTypeB, Type *AccumType, ElementCount VF, TTI::PartialReductionExtendKind OpAExtend, TTI::PartialReductionExtendKind OpBExtend, std::optional< unsigned > BinOp, TTI::TargetCostKind CostKind, std::optional< FastMathFlags > FMF) const override
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE) const override
bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) const override
InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind) const override
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
APInt getPriorityMask(const Function &F) const override
bool shouldMaximizeVectorBandwidth(TargetTransformInfo::RegisterKind K) const override
bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1, const TargetTransformInfo::LSRCost &C2) const override
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
bool isProfitableToSinkOperands(Instruction *I, SmallVectorImpl< Use * > &Ops) const override
Check if sinking I's operands to I's basic block is profitable, because the operands can be folded in...
std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) const override
bool useNeonVector(const Type *Ty) const
std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const override
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) const override
InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *ValTy, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind) const override
TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth) const override
InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index, TTI::TargetCostKind CostKind) const override
unsigned getInlineCallPenalty(const Function *F, const CallBase &Call, unsigned DefaultCallPenalty) const override
bool areInlineCompatible(const Function *Caller, const Function *Callee) const override
unsigned getMaxNumElements(ElementCount VF) const
Try to return an estimate cost factor that can be used as a multiplier when scalarizing an operation ...
bool shouldTreatInstructionLikeSelect(const Instruction *I) const override
bool isMultiversionedFunction(const Function &F) const override
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const override
bool isLegalToVectorizeReduction(const RecurrenceDescriptor &RdxDesc, ElementCount VF) const override
TTI::MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const override
InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind) const override
bool isLegalMaskedGatherScatter(Type *DataType) const
bool shouldConsiderAddressTypePromotion(const Instruction &I, bool &AllowPromotionWithoutCommonHeader) const override
See if I should be considered for address type promotion.
APInt getFeatureMask(const Function &F) const override
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false) const override
bool areTypesABICompatible(const Function *Caller, const Function *Callee, ArrayRef< Type * > Types) const override
bool enableScalableVectorization() const override
InstructionCost getMemIntrinsicInstrCost(const MemIntrinsicCostAttributes &MICA, TTI::TargetCostKind CostKind) const override
Value * getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst, Type *ExpectedType, bool CanCreate=true) const override
bool hasKnownLowerThroughputFromSchedulingModel(unsigned Opcode1, unsigned Opcode2) const
Check whether Opcode1 has less throughput according to the scheduling model than Opcode2.
unsigned getEpilogueVectorizationMinVF() const override
InstructionCost getSpliceCost(VectorType *Tp, int Index, TTI::TargetCostKind CostKind) const
InstructionCost getArithmeticReductionCostSVE(unsigned Opcode, VectorType *ValTy, TTI::TargetCostKind CostKind) const
InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, StackOffset BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace) const override
Return the cost of the scaling factor used in the addressing mode represented by AM for this target,...
bool preferFixedOverScalableIfEqualCost(bool IsEpilogue) const override
Class for arbitrary precision integers.
Definition APInt.h:78
bool isNegatedPowerOf2() const
Check if this APInt's negated value is a power of two greater than zero.
Definition APInt.h:450
unsigned popcount() const
Count the number of bits set.
Definition APInt.h:1685
unsigned countLeadingOnes() const
Definition APInt.h:1639
void negate()
Negate this APInt in place.
Definition APInt.h:1483
LLVM_ABI APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
Definition APInt.cpp:1052
unsigned logBase2() const
Definition APInt.h:1776
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
Definition APInt.h:834
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition APInt.h:441
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition APInt.h:307
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
Definition APInt.h:297
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1577
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
LLVM Basic Block Representation.
Definition BasicBlock.h:62
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false) const override
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind) const override
TTI::ShuffleKind improveShuffleKindFromMask(TTI::ShuffleKind Kind, ArrayRef< int > Mask, VectorType *SrcTy, int &Index, VectorType *&SubTy) const
bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace, Instruction *I=nullptr, int64_t ScalableOffset=0) const override
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
InstructionCost getScalarizationOverhead(VectorType *InTy, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, bool ForPoisonSrc=true, ArrayRef< Value * > VL={}, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const override
InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind) const override
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
InstructionCost getCallInstrCost(Function *F, Type *RetTy, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const override
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE) const override
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP) const override
InstructionCost getMulAccReductionCost(bool IsUnsigned, unsigned RedOpcode, Type *ResTy, VectorType *Ty, TTI::TargetCostKind CostKind) const override
InstructionCost getIndexedVectorInstrCostFromEnd(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index) const override
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
std::pair< InstructionCost, MVT > getTypeLegalizationCost(Type *Ty) const
InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind) const override
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const override
InstructionCost getMemIntrinsicInstrCost(const MemIntrinsicCostAttributes &MICA, TTI::TargetCostKind CostKind) const override
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
bool isTypeLegal(Type *Ty) const override
static BinaryOperator * CreateWithCopiedFlags(BinaryOps Opc, Value *V1, Value *V2, Value *CopyO, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Definition InstrTypes.h:219
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Value * getArgOperand(unsigned i) const
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition InstrTypes.h:679
@ ICMP_SLT
signed less than
Definition InstrTypes.h:705
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:706
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition InstrTypes.h:682
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
Definition InstrTypes.h:680
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Definition InstrTypes.h:681
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:703
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition InstrTypes.h:684
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition InstrTypes.h:687
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition InstrTypes.h:683
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition InstrTypes.h:685
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:704
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition InstrTypes.h:692
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition InstrTypes.h:686
static bool isIntPredicate(Predicate P)
Definition InstrTypes.h:776
bool isUnsigned() const
Definition InstrTypes.h:936
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static LLVM_ABI ConstantAggregateZero * get(Type *Ty)
This is the shared class of boolean and integer constants.
Definition Constants.h:87
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition Constants.h:219
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:159
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
static LLVM_ABI Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
Definition DataLayout.h:771
bool empty() const
Definition DenseMap.h:109
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
Definition DenseMap.h:169
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:164
static constexpr ElementCount getScalable(ScalarTy MinVal)
Definition TypeSize.h:312
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition TypeSize.h:309
static ExtractElementInst * Create(Value *Vec, Value *Idx, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
This provides a helper for copying FMF from an instruction or setting specified flags.
Definition IRBuilder.h:93
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:23
bool allowContract() const
Definition FMF.h:72
Container class for subtarget features.
Class to represent fixed width SIMD vectors.
unsigned getNumElements() const
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
Definition Type.cpp:802
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Value * CreateInsertElement(Type *VecTy, Value *NewElt, Value *Idx, const Twine &Name="")
Definition IRBuilder.h:2561
CallInst * CreateInsertVector(Type *DstType, Value *SrcVec, Value *SubVec, Value *Idx, const Twine &Name="")
Create a call to the vector.insert intrinsic.
Definition IRBuilder.h:1110
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
Definition IRBuilder.h:2549
IntegerType * getIntNTy(unsigned N)
Fetch the type representing an N-bit integer.
Definition IRBuilder.h:574
Type * getDoubleTy()
Fetch the type representing a 64-bit floating point value.
Definition IRBuilder.h:594
LLVM_ABI Value * CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name="")
Return a vector value that contains.
LLVM_ABI CallInst * CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment, Value *Mask, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Load intrinsic.
LLVM_ABI Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Definition IRBuilder.h:561
Type * getHalfTy()
Fetch the type representing a 16-bit floating point value.
Definition IRBuilder.h:579
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition IRBuilder.h:1944
ConstantInt * getInt64(uint64_t C)
Get a constant 64-bit value.
Definition IRBuilder.h:527
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
Value * CreateBitOrPointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2258
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Definition IRBuilder.h:2473
Value * CreateBinOpFMF(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, FMFSource FMFSource, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition IRBuilder.h:1717
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2176
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition IRBuilder.h:1854
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
Definition IRBuilder.h:2583
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Definition IRBuilder.h:1867
LLVM_ABI CallInst * CreateMaskedStore(Value *Val, Value *Ptr, Align Alignment, Value *Mask)
Create a call to Masked Store intrinsic.
Type * getFloatTy()
Fetch the type representing a 32-bit floating point value.
Definition IRBuilder.h:589
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Definition IRBuilder.h:2249
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition IRBuilder.h:207
LLVM_ABI Value * CreateElementCount(Type *Ty, ElementCount EC)
Create an expression which evaluates to the number of elements in EC at runtime.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2787
This instruction inserts a single (scalar) element into a VectorType value.
The core instruction combiner logic.
virtual Instruction * eraseInstFromFunction(Instruction &I)=0
Combiner aware instruction erasure.
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
BuilderTy & Builder
static InstructionCost getInvalid(CostType Val=0)
CostType getValue() const
This function is intended to be used as sparingly as possible, since the class provides the full rang...
LLVM_ABI bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
bool isBinaryOp() const
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
LLVM_ABI void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
Class to represent integer types.
bool hasGroups() const
Returns true if we have any interleave groups.
const SmallVectorImpl< Type * > & getArgTypes() const
const SmallVectorImpl< const Value * > & getArgs() const
A wrapper class for inspecting calls to intrinsic functions.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
An instruction for reading from memory.
Value * getPointerOperand()
iterator_range< block_iterator > blocks() const
RecurrenceSet & getFixedOrderRecurrences()
Return the fixed-order recurrences found in the loop.
PredicatedScalarEvolution * getPredicatedScalarEvolution() const
const ReductionList & getReductionVars() const
Returns the reduction variables found in the loop.
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
const FeatureBitset & getFeatureBits() const
Machine Value Type.
uint64_t getScalarSizeInBits() const
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
size_type size() const
Definition MapVector.h:56
Information for memory intrinsic cost model.
const Instruction * getInst() const
The optimization diagnostic interface.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
The RecurrenceDescriptor is used to identify recurrences variables in a loop.
Type * getRecurrenceType() const
Returns the type of the recurrence.
RecurKind getRecurrenceKind() const
This node represents a polynomial recurrence on the trip count of the specified loop.
bool isAffine() const
Return true if this represents an expression A + B*x where A and B are loop invariant values.
This class represents an analyzed expression in the program.
SMEAttrs is a utility class to parse the SME ACLE attributes on functions.
bool hasNonStreamingInterfaceAndBody() const
bool hasStreamingCompatibleInterface() const
bool hasStreamingInterfaceOrBody() const
bool isSMEABIRoutine() const
bool hasStreamingBody() const
void set(unsigned M, bool Enable=true)
SMECallAttrs is a utility class to hold the SMEAttrs for a callsite.
bool requiresPreservingZT0() const
bool requiresPreservingAllZAState() const
static LLVM_ABI ScalableVectorType * get(Type *ElementType, unsigned MinNumElts)
Definition Type.cpp:824
static ScalableVectorType * getDoubleElementsVectorType(ScalableVectorType *VTy)
The main scalar evolution driver.
LLVM_ABI const SCEV * getBackedgeTakenCount(const Loop *L, ExitCountKind Kind=Exact)
If the specified loop has a predictable backedge-taken count, return it, otherwise return a SCEVCould...
LLVM_ABI unsigned getSmallConstantTripMultiple(const Loop *L, const SCEV *ExitCount)
Returns the largest constant divisor of the trip count as a normal unsigned value,...
LLVM_ABI const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
LLVM_ABI unsigned getSmallConstantMaxTripCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > *Predicates=nullptr)
Returns the upper bound of the loop trip count as a normal unsigned value.
LLVM_ABI bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
const SCEV * getSymbolicMaxBackedgeTakenCount(const Loop *L)
When successful, this returns a SCEV that is greater than or equal to (i.e.
This instruction constructs a fixed permutation of two input vectors.
static LLVM_ABI bool isDeInterleaveMaskOfFactor(ArrayRef< int > Mask, unsigned Factor, unsigned &Index)
Check if the mask is a DE-interleave mask of the given factor Factor like: <Index,...
static LLVM_ABI bool isExtractSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is an extract subvector mask.
static LLVM_ABI bool isInterleaveMask(ArrayRef< int > Mask, unsigned Factor, unsigned NumInputElts, SmallVectorImpl< unsigned > &StartIndexes)
Return true if the mask interleaves one or more input vectors together.
size_type size() const
Definition SmallPtrSet.h:99
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
iterator insert(iterator I, T &&Elt)
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
Definition TypeSize.h:30
static StackOffset getScalable(int64_t Scalable)
Definition TypeSize.h:40
static StackOffset getFixed(int64_t Fixed)
Definition TypeSize.h:39
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition StringRef.h:730
Class to represent struct types.
TargetInstrInfo - Interface to description of machine instruction set.
std::pair< LegalizeTypeAction, EVT > LegalizeKind
LegalizeKind holds the legalization kind that needs to happen to EVT in order to type-legalize it.
const RTLIB::RuntimeLibcallsInfo & getRuntimeLibcallsInfo() const
Primary interface to the complete machine description for the target machine.
virtual const TargetSubtargetInfo * getSubtargetImpl(const Function &) const
Virtual method implemented by subclasses that returns a reference to that target's TargetSubtargetInf...
virtual const DataLayout & getDataLayout() const
virtual bool shouldTreatInstructionLikeSelect(const Instruction *I) const
virtual bool isLoweredToCall(const Function *F) const
virtual bool isLSRCostLess(const TTI::LSRCost &C1, const TTI::LSRCost &C2) const
bool isConstantStridedAccessLessThan(ScalarEvolution *SE, const SCEV *Ptr, int64_t MergeDistance) const
virtual bool areTypesABICompatible(const Function *Caller, const Function *Callee, ArrayRef< Type * > Types) const
InstructionCost getInstructionCost(const User *U, ArrayRef< const Value * > Operands, TTI::TargetCostKind CostKind) const override
VectorInstrContext
Represents a hint about the context in which an insert/extract is used.
@ None
The insert/extract is not used with a load/store.
@ Load
The value being inserted comes from a load (InsertElement only).
static LLVM_ABI OperandValueInfo getOperandInfo(const Value *V)
Collect properties of V used in cost analysis, e.g. OP_PowerOf2.
TargetCostKind
The kind of cost model.
@ TCK_RecipThroughput
Reciprocal throughput.
@ TCK_CodeSize
Instruction code size.
@ TCK_SizeAndLatency
The weighted sum of size and latency.
@ TCK_Latency
The latency of instruction.
static bool requiresOrderedReduction(std::optional< FastMathFlags > FMF)
A helper function to determine the type of reduction algorithm used for a given Opcode and set of Fas...
PopcntSupportKind
Flags indicating the kind of support for population count.
@ TCC_Free
Expected to fold away in lowering.
@ TCC_Basic
The cost of a typical 'add' instruction.
ShuffleKind
The various kinds of shuffle patterns for vector queries.
@ SK_InsertSubvector
InsertSubvector. Index indicates start offset.
@ SK_Select
Selects elements from the corresponding lane of either source operand.
@ SK_PermuteSingleSrc
Shuffle elements of single source vector with any shuffle mask.
@ SK_Transpose
Transpose two vectors.
@ SK_Splice
Concatenates elements from the first input vector with elements of the second input vector.
@ SK_Broadcast
Broadcast element 0 to all other elements.
@ SK_PermuteTwoSrc
Merge elements from two source vectors into one with any shuffle mask.
@ SK_Reverse
Reverse the order of the vector.
@ SK_ExtractSubvector
ExtractSubvector Index indicates start offset.
CastContextHint
Represents a hint about the context in which a cast is used.
@ Masked
The cast is used with a masked load/store.
@ Normal
The cast is used with a normal load/store.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition TypeSize.h:343
static constexpr TypeSize getScalable(ScalarTy MinimumSize)
Definition TypeSize.h:346
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
static LLVM_ABI IntegerType * getInt64Ty(LLVMContext &C)
Definition Type.cpp:297
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
Definition Type.cpp:61
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:296
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
Definition Type.h:153
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:197
LLVM_ABI Type * getWithNewBitWidth(unsigned NewBitWidth) const
Given an integer or vector type, change the lane bitwidth to NewBitwidth, whilst keeping the old numb...
bool isHalfTy() const
Return true if this is 'half', a 16-bit IEEE fp type.
Definition Type.h:142
LLVM_ABI Type * getWithNewType(Type *EltTy) const
Given vector type, change the element type, whilst keeping the old number of elements.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition Type.h:128
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:230
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
Definition Type.h:156
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
Definition Type.cpp:293
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:184
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
Definition Type.cpp:300
static LLVM_ABI Type * getFloatTy(LLVMContext &C)
Definition Type.cpp:284
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
const Use & getOperandUse(unsigned i) const
Definition User.h:220
Value * getOperand(unsigned i) const
Definition User.h:207
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
user_iterator user_begin()
Definition Value.h:402
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition Value.h:439
LLVM_ABI Align getPointerAlignment(const DataLayout &DL) const
Returns an alignment of the pointer value.
Definition Value.cpp:962
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:403
Base class of all SIMD vector types.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
static VectorType * getInteger(VectorType *VTy)
This static method gets a VectorType with the same number of elements as the input type,...
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Type * getElementType() const
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:216
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
constexpr bool isFixed() const
Returns true if the quantity is not scaled by vscale.
Definition TypeSize.h:171
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
Definition TypeSize.h:252
const ParentTy * getParent() const
Definition ilist_node.h:34
CallInst * Call
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
static bool isLogicalImmediate(uint64_t imm, unsigned regSize)
isLogicalImmediate - Return true if the immediate is valid for a logical immediate instruction of the...
void expandMOVImm(uint64_t Imm, unsigned BitSize, SmallVectorImpl< ImmInsnModel > &Insn)
Expand a MOVi32imm or MOVi64imm pseudo instruction to one or more real move-immediate instructions to...
LLVM_ABI APInt getCpuSupportsMask(ArrayRef< StringRef > Features)
static constexpr unsigned SVEBitsPerBlock
LLVM_ABI APInt getFMVPriority(ArrayRef< StringRef > Features)
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
ISD namespace - This namespace contains an enum which represents all of the SelectionDAG node types a...
Definition ISDOpcodes.h:24
@ ADD
Simple integer binary arithmetic operators.
Definition ISDOpcodes.h:264
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition ISDOpcodes.h:880
@ FADD
Simple binary floating point operators.
Definition ISDOpcodes.h:417
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition ISDOpcodes.h:993
@ SIGN_EXTEND
Conversion operators.
Definition ISDOpcodes.h:844
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ SHL
Shift and rotation operations.
Definition ISDOpcodes.h:765
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition ISDOpcodes.h:850
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
Definition ISDOpcodes.h:978
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition ISDOpcodes.h:926
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition ISDOpcodes.h:739
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
Definition ISDOpcodes.h:959
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Definition ISDOpcodes.h:856
This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
SpecificConstantMatch m_ZeroInt()
Convenience matchers for specific integer values.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
BinaryOp_match< LHS, RHS, Instruction::FMul > m_FMul(const LHS &L, const RHS &R)
bool match(Val *V, const Pattern &P)
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
TwoOps_match< Val_t, Idx_t, Instruction::ExtractElement > m_ExtractElt(const Val_t &Val, const Idx_t &Idx)
Matches ExtractElementInst.
cst_pred_ty< is_nonnegative > m_NonNegative()
Match an integer or vector of non-negative values.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
IntrinsicID_match m_VScale()
Matches a call to llvm.vscale().
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
OneOps_match< OpTy, Instruction::Load > m_Load(const OpTy &Op)
Matches LoadInst.
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
class_match< CmpInst > m_Cmp()
Matches any compare instruction and ignore it.
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)
Matches a Add with LHS and RHS in either order.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
auto m_Undef()
Match an arbitrary undef constant.
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
std::optional< unsigned > isDUPQMask(ArrayRef< int > Mask, unsigned Segments, unsigned SegmentSize)
isDUPQMask - matches a splat of equivalent lanes within segments of a given number of elements.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1739
const CostTblEntryT< CostType > * CostTableLookup(ArrayRef< CostTblEntryT< CostType > > Tbl, int ISD, MVT Ty)
Find in cost table.
Definition CostTable.h:35
LLVM_ABI bool getBooleanLoopAttribute(const Loop *TheLoop, StringRef Name)
Returns true if Name is applied to TheLoop and enabled.
bool isZIPMask(ArrayRef< int > M, unsigned NumElts, unsigned &WhichResultOut, unsigned &OperandOrderOut)
Return true for zip1 or zip2 masks of the form: <0, 8, 1, 9, 2, 10, 3, 11> (WhichResultOut = 0,...
TailFoldingOpts
An enum to describe what types of loops we should attempt to tail-fold: Disabled: None Reductions: Lo...
InstructionCost Cost
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2554
bool isDUPFirstSegmentMask(ArrayRef< int > Mask, unsigned Segments, unsigned SegmentSize)
isDUPFirstSegmentMask - matches a splat of the first 128b segment.
TypeConversionCostTblEntryT< unsigned > TypeConversionCostTblEntry
Definition CostTable.h:61
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
@ Uninitialized
Definition Threading.h:60
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
Definition InstrProf.h:296
LLVM_ABI std::optional< const MDOperand * > findStringMetadataForLoop(const Loop *TheLoop, StringRef Name)
Find string metadata for loop.
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition MathExtras.h:284
LLVM_ABI Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
constexpr auto equal_to(T &&Arg)
Functor variant of std::equal_to that can be used as a UnaryPredicate in functional algorithms like a...
Definition STLExtras.h:2173
LLVM_ABI bool MaskedValueIsZero(const Value *V, const APInt &Mask, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if 'V & Mask' is known to be zero.
unsigned M1(unsigned Val)
Definition VE.h:377
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1746
LLVM_ABI bool isSplatValue(const Value *V, int Index=-1, unsigned Depth=0)
Return true if each element of the vector value V is poisoned or equal to every other non-poisoned el...
unsigned getPerfectShuffleCost(llvm::ArrayRef< int > M)
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:331
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1753
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
bool isUZPMask(ArrayRef< int > M, unsigned NumElts, unsigned &WhichResultOut)
Return true for uzp1 or uzp2 masks of the form: <0, 2, 4, 6, 8, 10, 12, 14> or <1,...
bool isREVMask(ArrayRef< int > M, unsigned EltSize, unsigned NumElts, unsigned BlockSize)
isREVMask - Check if a vector shuffle corresponds to a REV instruction with the specified blocksize.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
constexpr int PoisonMaskElem
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
TargetTransformInfo TTI
LLVM_ABI Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ Or
Bitwise or logical OR of integers.
@ AnyOf
AnyOf reduction with select(cmp(),x,y) where one of (x,y) is loop invariant, and both x and y are int...
@ Xor
Bitwise or logical XOR of integers.
@ FindLast
FindLast reduction with select(cmp(),x,y) where x and y.
@ FMax
FP max implemented in terms of select(cmp()).
@ FMulAdd
Sum of float products with llvm.fmuladd(a * b + sum).
@ FMul
Product of floats.
@ SMax
Signed integer max implemented in terms of select(cmp()).
@ And
Bitwise or logical AND of integers.
@ SMin
Signed integer min implemented in terms of select(cmp()).
@ FMin
FP min implemented in terms of select(cmp()).
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
@ AddChainWithSubs
A chain of adds and subs.
@ FAdd
Sum of floats.
@ UMax
Unsigned integer max implemented in terms of select(cmp()).
DWARFExpression::Operation Op
CostTblEntryT< unsigned > CostTblEntry
Definition CostTable.h:30
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
unsigned getNumElementsFromSVEPredPattern(unsigned Pattern)
Return the number of active elements for VL1 to VL256 predicate pattern, zero for all other patterns.
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1947
Type * getLoadStoreType(const Value *I)
A helper function that returns the type of a load or store instruction.
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
Definition STLExtras.h:2166
Type * toVectorTy(Type *Scalar, ElementCount EC)
A helper function for converting Scalar types to vector types.
LLVM_ABI std::optional< int64_t > getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy, Value *Ptr, const Loop *Lp, const DominatorTree &DT, const DenseMap< Value *, const SCEV * > &StridesMap=DenseMap< Value *, const SCEV * >(), bool Assume=false, bool ShouldCheckWrap=true)
If the pointer has a constant stride return it in units of the access type size.
const TypeConversionCostTblEntryT< CostType > * ConvertCostTableLookup(ArrayRef< TypeConversionCostTblEntryT< CostType > > Tbl, int ISD, MVT Dst, MVT Src)
Find in type conversion cost table.
Definition CostTable.h:66
constexpr uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
Definition MathExtras.h:373
bool isTRNMask(ArrayRef< int > M, unsigned NumElts, unsigned &WhichResultOut, unsigned &OperandOrderOut)
Return true for trn1 or trn2 masks of the form: <0, 8, 2, 10, 4, 12, 6, 14> (WhichResultOut = 0,...
#define N
static SVEIntrinsicInfo defaultMergingUnaryNarrowingTopOp()
static SVEIntrinsicInfo defaultZeroingOp()
SVEIntrinsicInfo & setOperandIdxInactiveLanesTakenFrom(unsigned Index)
static SVEIntrinsicInfo defaultMergingOp(Intrinsic::ID IID=Intrinsic::not_intrinsic)
SVEIntrinsicInfo & setOperandIdxWithNoActiveLanes(unsigned Index)
unsigned getOperandIdxWithNoActiveLanes() const
SVEIntrinsicInfo & setInactiveLanesAreUnused()
SVEIntrinsicInfo & setInactiveLanesAreNotDefined()
SVEIntrinsicInfo & setGoverningPredicateOperandIdx(unsigned Index)
static SVEIntrinsicInfo defaultUndefOp()
Intrinsic::ID getMatchingUndefIntrinsic() const
SVEIntrinsicInfo & setResultIsZeroInitialized()
static SVEIntrinsicInfo defaultMergingUnaryOp()
SVEIntrinsicInfo & setMatchingUndefIntrinsic(Intrinsic::ID IID)
unsigned getGoverningPredicateOperandIdx() const
SVEIntrinsicInfo & setMatchingIROpcode(unsigned Opcode)
unsigned getOperandIdxInactiveLanesTakenFrom() const
static SVEIntrinsicInfo defaultVoidOp(unsigned GPIndex)
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Extended Value Type.
Definition ValueTypes.h:35
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition ValueTypes.h:137
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
Definition ValueTypes.h:74
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
Definition ValueTypes.h:284
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition ValueTypes.h:373
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
Definition ValueTypes.h:359
uint64_t getScalarSizeInBits() const
Definition ValueTypes.h:385
static LLVM_ABI EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition ValueTypes.h:316
bool isFixedLengthVector() const
Definition ValueTypes.h:181
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Definition ValueTypes.h:323
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
Definition ValueTypes.h:174
EVT getVectorElementType() const
Given a vector type, return the type of each element.
Definition ValueTypes.h:328
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
Definition ValueTypes.h:336
Summarize the scheduling resources required for an instruction of a particular scheduling class.
Definition MCSchedule.h:123
bool isVariant() const
Definition MCSchedule.h:144
Machine model for scheduling, bundling, and heuristics.
Definition MCSchedule.h:258
static LLVM_ABI double getReciprocalThroughput(const MCSubtargetInfo &STI, const MCSchedClassDesc &SCDesc)
Matching combinators.
Information about a load/store intrinsic defined by the target.
InterleavedAccessInfo * IAI
LoopVectorizationLegality * LVL
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
unsigned Insns
TODO: Some of these could be merged.
Returns options for expansion of memcmp. IsZeroCmp is.
Parameters that control the generic loop unrolling transformation.
bool UpperBound
Allow using trip count upper bound to unroll loops.
bool Force
Apply loop unroll on any kind of loop (mainly to loops that fail runtime unrolling).
unsigned PartialOptSizeThreshold
The cost threshold for the unrolled loop when optimizing for size, like OptSizeThreshold,...
unsigned DefaultUnrollRuntimeCount
Default unroll count for loops with run-time trip count.
bool RuntimeUnrollMultiExit
Allow runtime unrolling multi-exit loops.
unsigned SCEVExpansionBudget
Don't allow runtime unrolling if expanding the trip count takes more than SCEVExpansionBudget.
bool AddAdditionalAccumulators
Allow unrolling to add parallel reduction phis.
unsigned UnrollAndJamInnerLoopThreshold
Threshold for unroll and jam, for inner loop size.
bool UnrollAndJam
Allow unroll and jam. Used to enable unroll and jam for the target.
bool UnrollRemainder
Allow unrolling of all the iterations of the runtime loop remainder.
unsigned PartialThreshold
The cost threshold for the unrolled loop, like Threshold, but used for partial/runtime unrolling (set...
bool Runtime
Allow runtime unrolling (unrolling of loops to expand the size of the loop body even when the number ...
bool Partial
Allow partial unrolling (unrolling of loops to expand the size of the loop body, not only to eliminat...