LLVM 23.0.0git
SystemZTargetTransformInfo.cpp
Go to the documentation of this file.
1//===-- SystemZTargetTransformInfo.cpp - SystemZ-specific TTI -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements a TargetTransformInfo analysis pass specific to the
10// SystemZ target machine. It uses the target's detailed information to provide
11// more precise answers to certain TTI queries, while letting the target
12// independent and default TTI implementations handle the rest.
13//
14//===----------------------------------------------------------------------===//
15
23#include "llvm/IR/Intrinsics.h"
24#include "llvm/Support/Debug.h"
27
28using namespace llvm;
29
30#define DEBUG_TYPE "systemztti"
31
32//===----------------------------------------------------------------------===//
33//
34// SystemZ cost model.
35//
36//===----------------------------------------------------------------------===//
37
38static bool isUsedAsMemCpySource(const Value *V, bool &OtherUse) {
39 bool UsedAsMemCpySource = false;
40 for (const User *U : V->users())
41 if (const Instruction *User = dyn_cast<Instruction>(U)) {
43 UsedAsMemCpySource |= isUsedAsMemCpySource(User, OtherUse);
44 continue;
45 }
46 if (const MemCpyInst *Memcpy = dyn_cast<MemCpyInst>(User)) {
47 if (Memcpy->getOperand(1) == V && !Memcpy->isVolatile()) {
48 UsedAsMemCpySource = true;
49 continue;
50 }
51 }
52 OtherUse = true;
53 }
54 return UsedAsMemCpySource;
55}
56
57static void countNumMemAccesses(const Value *Ptr, unsigned &NumStores,
58 unsigned &NumLoads, const Function *F) {
59 if (!isa<PointerType>(Ptr->getType()))
60 return;
61 for (const User *U : Ptr->users())
62 if (const Instruction *User = dyn_cast<Instruction>(U)) {
63 if (User->getParent()->getParent() == F) {
64 if (const auto *SI = dyn_cast<StoreInst>(User)) {
65 if (SI->getPointerOperand() == Ptr && !SI->isVolatile())
66 NumStores++;
67 } else if (const auto *LI = dyn_cast<LoadInst>(User)) {
68 if (LI->getPointerOperand() == Ptr && !LI->isVolatile())
69 NumLoads++;
70 } else if (const auto *GEP = dyn_cast<GetElementPtrInst>(User)) {
71 if (GEP->getPointerOperand() == Ptr)
72 countNumMemAccesses(GEP, NumStores, NumLoads, F);
73 }
74 }
75 }
76}
77
79 unsigned Bonus = 0;
80 const Function *Caller = CB->getParent()->getParent();
81 const Function *Callee = CB->getCalledFunction();
82 if (!Callee)
83 return 0;
84
85 // Increase the threshold if an incoming argument is used only as a memcpy
86 // source.
87 for (const Argument &Arg : Callee->args()) {
88 bool OtherUse = false;
89 if (isUsedAsMemCpySource(&Arg, OtherUse) && !OtherUse) {
90 Bonus = 1000;
91 break;
92 }
93 }
94
95 // Give bonus for globals used much in both caller and a relatively small
96 // callee.
97 unsigned InstrCount = 0;
99 for (auto &I : instructions(Callee)) {
100 if (++InstrCount == 200) {
101 Ptr2NumUses.clear();
102 break;
103 }
104 if (const auto *SI = dyn_cast<StoreInst>(&I)) {
105 if (!SI->isVolatile())
106 if (auto *GV = dyn_cast<GlobalVariable>(SI->getPointerOperand()))
107 Ptr2NumUses[GV]++;
108 } else if (const auto *LI = dyn_cast<LoadInst>(&I)) {
109 if (!LI->isVolatile())
110 if (auto *GV = dyn_cast<GlobalVariable>(LI->getPointerOperand()))
111 Ptr2NumUses[GV]++;
112 } else if (const auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
113 if (auto *GV = dyn_cast<GlobalVariable>(GEP->getPointerOperand())) {
114 unsigned NumStores = 0, NumLoads = 0;
115 countNumMemAccesses(GEP, NumStores, NumLoads, Callee);
116 Ptr2NumUses[GV] += NumLoads + NumStores;
117 }
118 }
119 }
120
121 for (auto [Ptr, NumCalleeUses] : Ptr2NumUses)
122 if (NumCalleeUses > 10) {
123 unsigned CallerStores = 0, CallerLoads = 0;
124 countNumMemAccesses(Ptr, CallerStores, CallerLoads, Caller);
125 if (CallerStores + CallerLoads > 10) {
126 Bonus = 1000;
127 break;
128 }
129 }
130
131 // Give bonus when Callee accesses an Alloca of Caller heavily.
132 unsigned NumStores = 0;
133 unsigned NumLoads = 0;
134 for (unsigned OpIdx = 0; OpIdx != Callee->arg_size(); ++OpIdx) {
135 Value *CallerArg = CB->getArgOperand(OpIdx);
136 Argument *CalleeArg = Callee->getArg(OpIdx);
137 if (isa<AllocaInst>(CallerArg))
138 countNumMemAccesses(CalleeArg, NumStores, NumLoads, Callee);
139 }
140 if (NumLoads > 10)
141 Bonus += NumLoads * 50;
142 if (NumStores > 10)
143 Bonus += NumStores * 50;
144 Bonus = std::min(Bonus, unsigned(1000));
145
146 LLVM_DEBUG(if (Bonus)
147 dbgs() << "++ SZTTI Adding inlining bonus: " << Bonus << "\n";);
148 return Bonus;
149}
150
154 assert(Ty->isIntegerTy());
155
156 unsigned BitSize = Ty->getPrimitiveSizeInBits();
157 // There is no cost model for constants with a bit size of 0. Return TCC_Free
158 // here, so that constant hoisting will ignore this constant.
159 if (BitSize == 0)
160 return TTI::TCC_Free;
161 // No cost model for operations on integers larger than 128 bit implemented yet.
162 if ((!ST->hasVector() && BitSize > 64) || BitSize > 128)
163 return TTI::TCC_Free;
164
165 if (Imm == 0)
166 return TTI::TCC_Free;
167
168 if (Imm.getBitWidth() <= 64) {
169 // Constants loaded via lgfi.
170 if (isInt<32>(Imm.getSExtValue()))
171 return TTI::TCC_Basic;
172 // Constants loaded via llilf.
173 if (isUInt<32>(Imm.getZExtValue()))
174 return TTI::TCC_Basic;
175 // Constants loaded via llihf:
176 if ((Imm.getZExtValue() & 0xffffffff) == 0)
177 return TTI::TCC_Basic;
178
179 return 2 * TTI::TCC_Basic;
180 }
181
182 // i128 immediates loads from Constant Pool
183 return 2 * TTI::TCC_Basic;
184}
185
187 const APInt &Imm, Type *Ty,
189 Instruction *Inst) const {
190 assert(Ty->isIntegerTy());
191
192 unsigned BitSize = Ty->getPrimitiveSizeInBits();
193 // There is no cost model for constants with a bit size of 0. Return TCC_Free
194 // here, so that constant hoisting will ignore this constant.
195 if (BitSize == 0)
196 return TTI::TCC_Free;
197 // No cost model for operations on integers larger than 64 bit implemented yet.
198 if (BitSize > 64)
199 return TTI::TCC_Free;
200
201 switch (Opcode) {
202 default:
203 return TTI::TCC_Free;
204 case Instruction::GetElementPtr:
205 // Always hoist the base address of a GetElementPtr. This prevents the
206 // creation of new constants for every base constant that gets constant
207 // folded with the offset.
208 if (Idx == 0)
209 return 2 * TTI::TCC_Basic;
210 return TTI::TCC_Free;
211 case Instruction::Store:
212 if (Idx == 0 && Imm.getBitWidth() <= 64) {
213 // Any 8-bit immediate store can by implemented via mvi.
214 if (BitSize == 8)
215 return TTI::TCC_Free;
216 // 16-bit immediate values can be stored via mvhhi/mvhi/mvghi.
217 if (isInt<16>(Imm.getSExtValue()))
218 return TTI::TCC_Free;
219 }
220 break;
221 case Instruction::ICmp:
222 if (Idx == 1 && Imm.getBitWidth() <= 64) {
223 // Comparisons against signed 32-bit immediates implemented via cgfi.
224 if (isInt<32>(Imm.getSExtValue()))
225 return TTI::TCC_Free;
226 // Comparisons against unsigned 32-bit immediates implemented via clgfi.
227 if (isUInt<32>(Imm.getZExtValue()))
228 return TTI::TCC_Free;
229 }
230 break;
231 case Instruction::Add:
232 case Instruction::Sub:
233 if (Idx == 1 && Imm.getBitWidth() <= 64) {
234 // We use algfi/slgfi to add/subtract 32-bit unsigned immediates.
235 if (isUInt<32>(Imm.getZExtValue()))
236 return TTI::TCC_Free;
237 // Or their negation, by swapping addition vs. subtraction.
238 if (isUInt<32>(-Imm.getSExtValue()))
239 return TTI::TCC_Free;
240 }
241 break;
242 case Instruction::Mul:
243 if (Idx == 1 && Imm.getBitWidth() <= 64) {
244 // We use msgfi to multiply by 32-bit signed immediates.
245 if (isInt<32>(Imm.getSExtValue()))
246 return TTI::TCC_Free;
247 }
248 break;
249 case Instruction::Or:
250 case Instruction::Xor:
251 if (Idx == 1 && Imm.getBitWidth() <= 64) {
252 // Masks supported by oilf/xilf.
253 if (isUInt<32>(Imm.getZExtValue()))
254 return TTI::TCC_Free;
255 // Masks supported by oihf/xihf.
256 if ((Imm.getZExtValue() & 0xffffffff) == 0)
257 return TTI::TCC_Free;
258 }
259 break;
260 case Instruction::And:
261 if (Idx == 1 && Imm.getBitWidth() <= 64) {
262 // Any 32-bit AND operation can by implemented via nilf.
263 if (BitSize <= 32)
264 return TTI::TCC_Free;
265 // 64-bit masks supported by nilf.
266 if (isUInt<32>(~Imm.getZExtValue()))
267 return TTI::TCC_Free;
268 // 64-bit masks supported by nilh.
269 if ((Imm.getZExtValue() & 0xffffffff) == 0xffffffff)
270 return TTI::TCC_Free;
271 // Some 64-bit AND operations can be implemented via risbg.
272 const SystemZInstrInfo *TII = ST->getInstrInfo();
273 unsigned Start, End;
274 if (TII->isRxSBGMask(Imm.getZExtValue(), BitSize, Start, End))
275 return TTI::TCC_Free;
276 }
277 break;
278 case Instruction::Shl:
279 case Instruction::LShr:
280 case Instruction::AShr:
281 // Always return TCC_Free for the shift value of a shift instruction.
282 if (Idx == 1)
283 return TTI::TCC_Free;
284 break;
285 case Instruction::UDiv:
286 case Instruction::SDiv:
287 case Instruction::URem:
288 case Instruction::SRem:
289 case Instruction::Trunc:
290 case Instruction::ZExt:
291 case Instruction::SExt:
292 case Instruction::IntToPtr:
293 case Instruction::PtrToInt:
294 case Instruction::BitCast:
295 case Instruction::PHI:
296 case Instruction::Call:
297 case Instruction::Select:
298 case Instruction::Ret:
299 case Instruction::Load:
300 break;
301 }
302
304}
305
308 const APInt &Imm, Type *Ty,
310 assert(Ty->isIntegerTy());
311
312 unsigned BitSize = Ty->getPrimitiveSizeInBits();
313 // There is no cost model for constants with a bit size of 0. Return TCC_Free
314 // here, so that constant hoisting will ignore this constant.
315 if (BitSize == 0)
316 return TTI::TCC_Free;
317 // No cost model for operations on integers larger than 64 bit implemented yet.
318 if (BitSize > 64)
319 return TTI::TCC_Free;
320
321 switch (IID) {
322 default:
323 return TTI::TCC_Free;
324 case Intrinsic::sadd_with_overflow:
325 case Intrinsic::uadd_with_overflow:
326 case Intrinsic::ssub_with_overflow:
327 case Intrinsic::usub_with_overflow:
328 // These get expanded to include a normal addition/subtraction.
329 if (Idx == 1 && Imm.getBitWidth() <= 64) {
330 if (isUInt<32>(Imm.getZExtValue()))
331 return TTI::TCC_Free;
332 if (isUInt<32>(-Imm.getSExtValue()))
333 return TTI::TCC_Free;
334 }
335 break;
336 case Intrinsic::smul_with_overflow:
337 case Intrinsic::umul_with_overflow:
338 // These get expanded to include a normal multiplication.
339 if (Idx == 1 && Imm.getBitWidth() <= 64) {
340 if (isInt<32>(Imm.getSExtValue()))
341 return TTI::TCC_Free;
342 }
343 break;
344 case Intrinsic::experimental_stackmap:
345 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
346 return TTI::TCC_Free;
347 break;
348 case Intrinsic::experimental_patchpoint_void:
349 case Intrinsic::experimental_patchpoint:
350 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
351 return TTI::TCC_Free;
352 break;
353 }
355}
356
358SystemZTTIImpl::getPopcntSupport(unsigned TyWidth) const {
359 assert(isPowerOf2_32(TyWidth) && "Type width must be power of 2");
360 if (ST->hasPopulationCount() && TyWidth <= 64)
362 return TTI::PSK_Software;
363}
364
367 OptimizationRemarkEmitter *ORE) const {
368 // Find out if L contains a call, what the machine instruction count
369 // estimate is, and how many stores there are.
370 bool HasCall = false;
371 InstructionCost NumStores = 0;
372 for (auto &BB : L->blocks())
373 for (auto &I : *BB) {
374 if (isa<CallInst>(&I) || isa<InvokeInst>(&I)) {
375 if (const Function *F = cast<CallBase>(I).getCalledFunction()) {
376 if (isLoweredToCall(F))
377 HasCall = true;
378 if (F->getIntrinsicID() == Intrinsic::memcpy ||
379 F->getIntrinsicID() == Intrinsic::memset)
380 NumStores++;
381 } else { // indirect call.
382 HasCall = true;
383 }
384 }
385 if (isa<StoreInst>(&I)) {
386 Type *MemAccessTy = I.getOperand(0)->getType();
387 NumStores += getMemoryOpCost(Instruction::Store, MemAccessTy, Align(),
389 }
390 }
391
392 // The z13 processor will run out of store tags if too many stores
393 // are fed into it too quickly. Therefore make sure there are not
394 // too many stores in the resulting unrolled loop.
395 unsigned const NumStoresVal = NumStores.getValue();
396 unsigned const Max = (NumStoresVal ? (12 / NumStoresVal) : UINT_MAX);
397
398 if (HasCall) {
399 // Only allow full unrolling if loop has any calls.
400 UP.FullUnrollMaxCount = Max;
401 UP.MaxCount = 1;
402 return;
403 }
404
405 UP.MaxCount = Max;
406 if (UP.MaxCount <= 1)
407 return;
408
409 // Allow partial and runtime trip count unrolling.
410 UP.Partial = UP.Runtime = true;
411
412 UP.PartialThreshold = 75;
414
415 // Allow expensive instructions in the pre-header of the loop.
416 UP.AllowExpensiveTripCount = true;
417
418 UP.Force = true;
419}
420
425
428 const TargetTransformInfo::LSRCost &C2) const {
429 // SystemZ specific: check instruction count (first), and don't care about
430 // ImmCost, since offsets are checked explicitly.
431 return std::tie(C1.Insns, C1.NumRegs, C1.AddRecCost,
432 C1.NumIVMuls, C1.NumBaseAdds,
433 C1.ScaleCost, C1.SetupCost) <
434 std::tie(C2.Insns, C2.NumRegs, C2.AddRecCost,
435 C2.NumIVMuls, C2.NumBaseAdds,
436 C2.ScaleCost, C2.SetupCost);
437}
438
439unsigned SystemZTTIImpl::getNumberOfRegisters(unsigned ClassID) const {
440 bool Vector = (ClassID == 1);
441 if (!Vector)
442 // Discount the stack pointer. Also leave out %r0, since it can't
443 // be used in an address.
444 return 14;
445 if (ST->hasVector())
446 return 32;
447 return 0;
448}
449
452 switch (K) {
454 return TypeSize::getFixed(64);
456 return TypeSize::getFixed(ST->hasVector() ? 128 : 0);
458 return TypeSize::getScalable(0);
459 }
460
461 llvm_unreachable("Unsupported register kind");
462}
463
464unsigned SystemZTTIImpl::getMinPrefetchStride(unsigned NumMemAccesses,
465 unsigned NumStridedMemAccesses,
466 unsigned NumPrefetches,
467 bool HasCall) const {
468 // Don't prefetch a loop with many far apart accesses.
469 if (NumPrefetches > 16)
470 return UINT_MAX;
471
472 // Emit prefetch instructions for smaller strides in cases where we think
473 // the hardware prefetcher might not be able to keep up.
474 if (NumStridedMemAccesses > 32 && !HasCall &&
475 (NumMemAccesses - NumStridedMemAccesses) * 32 <= NumStridedMemAccesses)
476 return 1;
477
478 return ST->hasMiscellaneousExtensions3() ? 8192 : 2048;
479}
480
481bool SystemZTTIImpl::hasDivRemOp(Type *DataType, bool IsSigned) const {
482 EVT VT = TLI->getValueType(DL, DataType);
483 return (VT.isScalarInteger() && TLI->isTypeLegal(VT));
484}
485
486static bool isFreeEltLoad(const Value *Op) {
487 if (isa<LoadInst>(Op) && Op->hasOneUse()) {
488 const Instruction *UserI = cast<Instruction>(*Op->user_begin());
489 return !isa<StoreInst>(UserI); // Prefer MVC
490 }
491 return false;
492}
493
495 VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract,
496 TTI::TargetCostKind CostKind, bool ForPoisonSrc, ArrayRef<Value *> VL,
497 TTI::VectorInstrContext VIC) const {
498 unsigned NumElts = cast<FixedVectorType>(Ty)->getNumElements();
500
501 if (Insert && Ty->isIntOrIntVectorTy(64)) {
502 // VLVGP will insert two GPRs with one instruction, while VLE will load
503 // an element directly with no extra cost
504 assert((VL.empty() || VL.size() == NumElts) &&
505 "Type does not match the number of values.");
506 InstructionCost CurrVectorCost = 0;
507 for (unsigned Idx = 0; Idx < NumElts; ++Idx) {
508 if (DemandedElts[Idx] && !(VL.size() && isFreeEltLoad(VL[Idx])))
509 ++CurrVectorCost;
510 if (Idx % 2 == 1) {
511 Cost += std::min(InstructionCost(1), CurrVectorCost);
512 CurrVectorCost = 0;
513 }
514 }
515 Insert = false;
516 }
517
518 Cost += BaseT::getScalarizationOverhead(Ty, DemandedElts, Insert, Extract,
519 CostKind, ForPoisonSrc, VL);
520 return Cost;
521}
522
523// Return the bit size for the scalar type or vector element
524// type. getScalarSizeInBits() returns 0 for a pointer type.
525static unsigned getScalarSizeInBits(Type *Ty) {
526 unsigned Size =
527 (Ty->isPtrOrPtrVectorTy() ? 64U : Ty->getScalarSizeInBits());
528 assert(Size > 0 && "Element must have non-zero size.");
529 return Size;
530}
531
532// getNumberOfParts() calls getTypeLegalizationCost() which splits the vector
533// type until it is legal. This would e.g. return 4 for <6 x i64>, instead of
534// 3.
535static unsigned getNumVectorRegs(Type *Ty) {
536 auto *VTy = cast<FixedVectorType>(Ty);
537 unsigned WideBits = getScalarSizeInBits(Ty) * VTy->getNumElements();
538 assert(WideBits > 0 && "Could not compute size of vector");
539 return ((WideBits % 128U) ? ((WideBits / 128U) + 1) : (WideBits / 128U));
540}
541
542static bool isFoldableRMW(const Instruction *I, Type *Ty) {
544 if (!BI || !BI->hasOneUse())
545 return false;
546
547 unsigned Opcode = BI->getOpcode();
548 unsigned BitWidth = Ty->getScalarSizeInBits();
549
550 switch (Opcode) {
551 case Instruction::And:
552 case Instruction::Or:
553 case Instruction::Xor:
554 if (BitWidth != 8)
555 return false;
556 break;
557 case Instruction::Add:
558 case Instruction::Sub:
559 if (BitWidth != 32 && BitWidth != 64)
560 return false;
561 break;
562 default:
563 return false;
564 }
565
566 Value *Op0 = BI->getOperand(0), *Op1 = BI->getOperand(1);
567 if (!isa<ConstantInt>(Op0) && !isa<ConstantInt>(Op1))
568 return false;
569
570 Value *V =
571 (Opcode == Instruction::Sub) ? Op0 : (isa<ConstantInt>(Op0) ? Op1 : Op0);
572 if (Opcode == Instruction::Sub && !isa<ConstantInt>(Op1))
573 return false;
574
575 auto *LI = dyn_cast_or_null<LoadInst>(V);
576 // Already checked BI hasOneUse.
577 auto *SI = dyn_cast<StoreInst>(BI->user_back());
578
579 return LI && SI && !LI->isVolatile() && !SI->isVolatile() &&
580 LI->hasOneUse() && LI->getPointerOperand() == SI->getPointerOperand();
581}
582
584 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
586 ArrayRef<const Value *> Args, const Instruction *CxtI) const {
587
588 // TODO: Handle more cost kinds.
590 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
591 Op2Info, Args, CxtI);
592 if (CxtI && Ty && !Ty->isVectorTy() && isFoldableRMW(CxtI, Ty))
593 return TTI::TCC_Free;
594 // TODO: return a good value for BB-VECTORIZER that includes the
595 // immediate loads, which we do not want to count for the loop
596 // vectorizer, since they are hopefully hoisted out of the loop. This
597 // would require a new parameter 'InLoop', but not sure if constant
598 // args are common enough to motivate this.
599
600 unsigned ScalarBits = Ty->getScalarSizeInBits();
601
602 // There are thre cases of division and remainder: Dividing with a register
603 // needs a divide instruction. A divisor which is a power of two constant
604 // can be implemented with a sequence of shifts. Any other constant needs a
605 // multiply and shifts.
606 const unsigned DivInstrCost = 20;
607 const unsigned DivMulSeqCost = 10;
608 const unsigned SDivPow2Cost = 4;
609
610 bool SignedDivRem =
611 Opcode == Instruction::SDiv || Opcode == Instruction::SRem;
612 bool UnsignedDivRem =
613 Opcode == Instruction::UDiv || Opcode == Instruction::URem;
614
615 // Check for a constant divisor.
616 bool DivRemConst = false;
617 bool DivRemConstPow2 = false;
618 if ((SignedDivRem || UnsignedDivRem) && Args.size() == 2) {
619 if (const Constant *C = dyn_cast<Constant>(Args[1])) {
620 const ConstantInt *CVal =
621 (C->getType()->isVectorTy()
622 ? dyn_cast_or_null<const ConstantInt>(C->getSplatValue())
624 if (CVal && (CVal->getValue().isPowerOf2() ||
625 CVal->getValue().isNegatedPowerOf2()))
626 DivRemConstPow2 = true;
627 else
628 DivRemConst = true;
629 }
630 }
631
632 if (!Ty->isVectorTy()) {
633 // These FP operations are supported with a dedicated instruction for
634 // float, double and fp128 (base implementation assumes float generally
635 // costs 2).
636 if (Opcode == Instruction::FAdd || Opcode == Instruction::FSub ||
637 Opcode == Instruction::FMul || Opcode == Instruction::FDiv)
638 return 1;
639
640 // There is no native support for FRem.
641 if (Opcode == Instruction::FRem)
642 return LIBCALL_COST;
643
644 // Give discount for some combined logical operations if supported.
645 if (Args.size() == 2) {
646 if (Opcode == Instruction::Xor) {
647 for (const Value *A : Args) {
648 if (const Instruction *I = dyn_cast<Instruction>(A))
649 if (I->hasOneUse() &&
650 (I->getOpcode() == Instruction::Or ||
651 I->getOpcode() == Instruction::And ||
652 I->getOpcode() == Instruction::Xor))
653 if ((ScalarBits <= 64 && ST->hasMiscellaneousExtensions3()) ||
654 (isInt128InVR(Ty) &&
655 (I->getOpcode() == Instruction::Or || ST->hasVectorEnhancements1())))
656 return 0;
657 }
658 }
659 else if (Opcode == Instruction::And || Opcode == Instruction::Or) {
660 for (const Value *A : Args) {
661 if (const Instruction *I = dyn_cast<Instruction>(A))
662 if ((I->hasOneUse() && I->getOpcode() == Instruction::Xor) &&
663 ((ScalarBits <= 64 && ST->hasMiscellaneousExtensions3()) ||
664 (isInt128InVR(Ty) &&
665 (Opcode == Instruction::And || ST->hasVectorEnhancements1()))))
666 return 0;
667 }
668 }
669 }
670
671 // Or requires one instruction, although it has custom handling for i64.
672 if (Opcode == Instruction::Or)
673 return 1;
674
675 if (Opcode == Instruction::Xor && ScalarBits == 1) {
676 if (ST->hasLoadStoreOnCond2())
677 return 5; // 2 * (li 0; loc 1); xor
678 return 7; // 2 * ipm sequences ; xor ; shift ; compare
679 }
680
681 if (DivRemConstPow2)
682 return (SignedDivRem ? SDivPow2Cost : 1);
683 if (DivRemConst)
684 return DivMulSeqCost;
685 if (SignedDivRem || UnsignedDivRem)
686 return DivInstrCost;
687 }
688 else if (ST->hasVector()) {
689 auto *VTy = cast<FixedVectorType>(Ty);
690 unsigned VF = VTy->getNumElements();
691 unsigned NumVectors = getNumVectorRegs(Ty);
692
693 // These vector operations are custom handled, but are still supported
694 // with one instruction per vector, regardless of element size.
695 if (Opcode == Instruction::Shl || Opcode == Instruction::LShr ||
696 Opcode == Instruction::AShr) {
697 return NumVectors;
698 }
699
700 if (DivRemConstPow2)
701 return (NumVectors * (SignedDivRem ? SDivPow2Cost : 1));
702 if (DivRemConst) {
703 SmallVector<Type *> Tys(Args.size(), Ty);
704 return VF * DivMulSeqCost +
706 }
707 if (SignedDivRem || UnsignedDivRem) {
708 if (ST->hasVectorEnhancements3() && ScalarBits >= 32)
709 return NumVectors * DivInstrCost;
710 else if (VF > 4)
711 // Temporary hack: disable high vectorization factors with integer
712 // division/remainder, which will get scalarized and handled with
713 // GR128 registers. The mischeduler is not clever enough to avoid
714 // spilling yet.
715 return 1000;
716 }
717
718 // These FP operations are supported with a single vector instruction for
719 // double (base implementation assumes float generally costs 2). For
720 // FP128, the scalar cost is 1, and there is no overhead since the values
721 // are already in scalar registers.
722 if (Opcode == Instruction::FAdd || Opcode == Instruction::FSub ||
723 Opcode == Instruction::FMul || Opcode == Instruction::FDiv) {
724 switch (ScalarBits) {
725 case 32: {
726 // The vector enhancements facility 1 provides v4f32 instructions.
727 if (ST->hasVectorEnhancements1())
728 return NumVectors;
729 // Return the cost of multiple scalar invocation plus the cost of
730 // inserting and extracting the values.
731 InstructionCost ScalarCost =
732 getArithmeticInstrCost(Opcode, Ty->getScalarType(), CostKind);
733 SmallVector<Type *> Tys(Args.size(), Ty);
735 (VF * ScalarCost) +
737 // FIXME: VF 2 for these FP operations are currently just as
738 // expensive as for VF 4.
739 if (VF == 2)
740 Cost *= 2;
741 return Cost;
742 }
743 case 64:
744 case 128:
745 return NumVectors;
746 default:
747 break;
748 }
749 }
750
751 // There is no native support for FRem.
752 if (Opcode == Instruction::FRem) {
753 SmallVector<Type *> Tys(Args.size(), Ty);
755 (VF * LIBCALL_COST) +
757 // FIXME: VF 2 for float is currently just as expensive as for VF 4.
758 if (VF == 2 && ScalarBits == 32)
759 Cost *= 2;
760 return Cost;
761 }
762 }
763
764 // Fallback to the default implementation.
765 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, Op2Info,
766 Args, CxtI);
767}
768
771 VectorType *SrcTy, ArrayRef<int> Mask,
772 TTI::TargetCostKind CostKind, int Index,
774 const Instruction *CxtI) const {
775 Kind = improveShuffleKindFromMask(Kind, Mask, SrcTy, Index, SubTp);
776 if (ST->hasVector()) {
777 unsigned NumVectors = getNumVectorRegs(SrcTy);
778
779 // TODO: Since fp32 is expanded, the shuffle cost should always be 0.
780
781 // FP128 values are always in scalar registers, so there is no work
782 // involved with a shuffle, except for broadcast. In that case register
783 // moves are done with a single instruction per element.
784 if (SrcTy->getScalarType()->isFP128Ty())
785 return (Kind == TargetTransformInfo::SK_Broadcast ? NumVectors - 1 : 0);
786
787 switch (Kind) {
789 // ExtractSubvector Index indicates start offset.
790
791 // Extracting a subvector from first index is a noop.
792 return (Index == 0 ? 0 : NumVectors);
793
795 // Loop vectorizer calls here to figure out the extra cost of
796 // broadcasting a loaded value to all elements of a vector. Since vlrep
797 // loads and replicates with a single instruction, adjust the returned
798 // value.
799 return NumVectors - 1;
800
801 default:
802
803 // SystemZ supports single instruction permutation / replication.
804 return NumVectors;
805 }
806 }
807
808 return BaseT::getShuffleCost(Kind, DstTy, SrcTy, Mask, CostKind, Index,
809 SubTp);
810}
811
812// Return the log2 difference of the element sizes of the two vector types.
813static unsigned getElSizeLog2Diff(Type *Ty0, Type *Ty1) {
814 unsigned Bits0 = Ty0->getScalarSizeInBits();
815 unsigned Bits1 = Ty1->getScalarSizeInBits();
816
817 if (Bits1 > Bits0)
818 return (Log2_32(Bits1) - Log2_32(Bits0));
819
820 return (Log2_32(Bits0) - Log2_32(Bits1));
821}
822
823// Return the number of instructions needed to truncate SrcTy to DstTy.
824unsigned SystemZTTIImpl::getVectorTruncCost(Type *SrcTy, Type *DstTy) const {
825 assert (SrcTy->isVectorTy() && DstTy->isVectorTy());
826 assert(SrcTy->getPrimitiveSizeInBits().getFixedValue() >
828 "Packing must reduce size of vector type.");
831 "Packing should not change number of elements.");
832
833 // TODO: Since fp32 is expanded, the extract cost should always be 0.
834
835 unsigned NumParts = getNumVectorRegs(SrcTy);
836 if (NumParts <= 2)
837 // Up to 2 vector registers can be truncated efficiently with pack or
838 // permute. The latter requires an immediate mask to be loaded, which
839 // typically gets hoisted out of a loop. TODO: return a good value for
840 // BB-VECTORIZER that includes the immediate loads, which we do not want
841 // to count for the loop vectorizer.
842 return 1;
843
844 unsigned Cost = 0;
845 unsigned Log2Diff = getElSizeLog2Diff(SrcTy, DstTy);
846 unsigned VF = cast<FixedVectorType>(SrcTy)->getNumElements();
847 for (unsigned P = 0; P < Log2Diff; ++P) {
848 if (NumParts > 1)
849 NumParts /= 2;
850 Cost += NumParts;
851 }
852
853 // Currently, a general mix of permutes and pack instructions is output by
854 // isel, which follow the cost computation above except for this case which
855 // is one instruction less:
856 if (VF == 8 && SrcTy->getScalarSizeInBits() == 64 &&
857 DstTy->getScalarSizeInBits() == 8)
858 Cost--;
859
860 return Cost;
861}
862
863// Return the cost of converting a vector bitmask produced by a compare
864// (SrcTy), to the type of the select or extend instruction (DstTy).
866 Type *DstTy) const {
867 assert (SrcTy->isVectorTy() && DstTy->isVectorTy() &&
868 "Should only be called with vector types.");
869
870 unsigned PackCost = 0;
871 unsigned SrcScalarBits = SrcTy->getScalarSizeInBits();
872 unsigned DstScalarBits = DstTy->getScalarSizeInBits();
873 unsigned Log2Diff = getElSizeLog2Diff(SrcTy, DstTy);
874 if (SrcScalarBits > DstScalarBits)
875 // The bitmask will be truncated.
876 PackCost = getVectorTruncCost(SrcTy, DstTy);
877 else if (SrcScalarBits < DstScalarBits) {
878 unsigned DstNumParts = getNumVectorRegs(DstTy);
879 // Each vector select needs its part of the bitmask unpacked.
880 PackCost = Log2Diff * DstNumParts;
881 // Extra cost for moving part of mask before unpacking.
882 PackCost += DstNumParts - 1;
883 }
884
885 return PackCost;
886}
887
888// Return the type of the compared operands. This is needed to compute the
889// cost for a Select / ZExt or SExt instruction.
890static Type *getCmpOpsType(const Instruction *I, unsigned VF = 1) {
891 Type *OpTy = nullptr;
892 if (CmpInst *CI = dyn_cast<CmpInst>(I->getOperand(0)))
893 OpTy = CI->getOperand(0)->getType();
894 else if (Instruction *LogicI = dyn_cast<Instruction>(I->getOperand(0)))
895 if (LogicI->getNumOperands() == 2)
896 if (CmpInst *CI0 = dyn_cast<CmpInst>(LogicI->getOperand(0)))
897 if (isa<CmpInst>(LogicI->getOperand(1)))
898 OpTy = CI0->getOperand(0)->getType();
899
900 if (OpTy != nullptr) {
901 if (VF == 1) {
902 assert (!OpTy->isVectorTy() && "Expected scalar type");
903 return OpTy;
904 }
905 // Return the potentially vectorized type based on 'I' and 'VF'. 'I' may
906 // be either scalar or already vectorized with a same or lesser VF.
907 Type *ElTy = OpTy->getScalarType();
908 return FixedVectorType::get(ElTy, VF);
909 }
910
911 return nullptr;
912}
913
914// Get the cost of converting a boolean vector to a vector with same width
915// and element size as Dst, plus the cost of zero extending if needed.
916unsigned
918 const Instruction *I) const {
919 auto *DstVTy = cast<FixedVectorType>(Dst);
920 unsigned VF = DstVTy->getNumElements();
921 unsigned Cost = 0;
922 // If we know what the widths of the compared operands, get any cost of
923 // converting it to match Dst. Otherwise assume same widths.
924 Type *CmpOpTy = ((I != nullptr) ? getCmpOpsType(I, VF) : nullptr);
925 if (CmpOpTy != nullptr)
926 Cost = getVectorBitmaskConversionCost(CmpOpTy, Dst);
927 if (Opcode == Instruction::ZExt || Opcode == Instruction::UIToFP)
928 // One 'vn' per dst vector with an immediate mask.
929 Cost += getNumVectorRegs(Dst);
930 return Cost;
931}
932
934 Type *Src,
937 const Instruction *I) const {
938 // FIXME: Can the logic below also be used for these cost kinds?
940 auto BaseCost = BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
941 return BaseCost == 0 ? BaseCost : 1;
942 }
943
944 unsigned DstScalarBits = Dst->getScalarSizeInBits();
945 unsigned SrcScalarBits = Src->getScalarSizeInBits();
946
947 if (!Src->isVectorTy()) {
948 if (Dst->isVectorTy())
949 return BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
950
951 if (Opcode == Instruction::SIToFP || Opcode == Instruction::UIToFP) {
952 if (Src->isIntegerTy(128))
953 return LIBCALL_COST;
954 if (SrcScalarBits >= 32 ||
955 (I != nullptr && isa<LoadInst>(I->getOperand(0))))
956 return 1;
957 return SrcScalarBits > 1 ? 2 /*i8/i16 extend*/ : 5 /*branch seq.*/;
958 }
959
960 if ((Opcode == Instruction::FPToSI || Opcode == Instruction::FPToUI) &&
961 Dst->isIntegerTy(128))
962 return LIBCALL_COST;
963
964 if ((Opcode == Instruction::ZExt || Opcode == Instruction::SExt)) {
965 if (Src->isIntegerTy(1)) {
966 if (DstScalarBits == 128) {
967 if (Opcode == Instruction::SExt && ST->hasVectorEnhancements3())
968 return 0;/*VCEQQ*/
969 return 5 /*branch seq.*/;
970 }
971
972 if (ST->hasLoadStoreOnCond2())
973 return 2; // li 0; loc 1
974
975 // This should be extension of a compare i1 result, which is done with
976 // ipm and a varying sequence of instructions.
977 unsigned Cost = 0;
978 if (Opcode == Instruction::SExt)
979 Cost = (DstScalarBits < 64 ? 3 : 4);
980 if (Opcode == Instruction::ZExt)
981 Cost = 3;
982 Type *CmpOpTy = ((I != nullptr) ? getCmpOpsType(I) : nullptr);
983 if (CmpOpTy != nullptr && CmpOpTy->isFloatingPointTy())
984 // If operands of an fp-type was compared, this costs +1.
985 Cost++;
986 return Cost;
987 }
988 else if (isInt128InVR(Dst)) {
989 // Extensions from GPR to i128 (in VR) typically costs two instructions,
990 // but a zero-extending load would be just one extra instruction.
991 if (Opcode == Instruction::ZExt && I != nullptr)
992 if (LoadInst *Ld = dyn_cast<LoadInst>(I->getOperand(0)))
993 if (Ld->hasOneUse())
994 return 1;
995 return 2;
996 }
997 }
998
999 if (Opcode == Instruction::Trunc && isInt128InVR(Src) && I != nullptr) {
1000 if (LoadInst *Ld = dyn_cast<LoadInst>(I->getOperand(0)))
1001 if (Ld->hasOneUse())
1002 return 0; // Will be converted to GPR load.
1003 bool OnlyTruncatingStores = true;
1004 for (const User *U : I->users())
1005 if (!isa<StoreInst>(U)) {
1006 OnlyTruncatingStores = false;
1007 break;
1008 }
1009 if (OnlyTruncatingStores)
1010 return 0;
1011 return 2; // Vector element extraction.
1012 }
1013 }
1014 else if (ST->hasVector()) {
1015 // Vector to scalar cast.
1016 auto *SrcVecTy = cast<FixedVectorType>(Src);
1017 auto *DstVecTy = dyn_cast<FixedVectorType>(Dst);
1018 if (!DstVecTy) {
1019 // TODO: tune vector-to-scalar cast.
1020 return BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
1021 }
1022 unsigned VF = SrcVecTy->getNumElements();
1023 unsigned NumDstVectors = getNumVectorRegs(Dst);
1024 unsigned NumSrcVectors = getNumVectorRegs(Src);
1025
1026 if (Opcode == Instruction::Trunc) {
1027 if (Src->getScalarSizeInBits() == Dst->getScalarSizeInBits())
1028 return 0; // Check for NOOP conversions.
1029 return getVectorTruncCost(Src, Dst);
1030 }
1031
1032 if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) {
1033 if (SrcScalarBits >= 8) {
1034 // ZExt will use either a single unpack or a vector permute.
1035 if (Opcode == Instruction::ZExt)
1036 return NumDstVectors;
1037
1038 // SExt will be handled with one unpack per doubling of width.
1039 unsigned NumUnpacks = getElSizeLog2Diff(Src, Dst);
1040
1041 // For types that spans multiple vector registers, some additional
1042 // instructions are used to setup the unpacking.
1043 unsigned NumSrcVectorOps =
1044 (NumUnpacks > 1 ? (NumDstVectors - NumSrcVectors)
1045 : (NumDstVectors / 2));
1046
1047 return (NumUnpacks * NumDstVectors) + NumSrcVectorOps;
1048 }
1049 else if (SrcScalarBits == 1)
1050 return getBoolVecToIntConversionCost(Opcode, Dst, I);
1051 }
1052
1053 if (Opcode == Instruction::SIToFP || Opcode == Instruction::UIToFP ||
1054 Opcode == Instruction::FPToSI || Opcode == Instruction::FPToUI) {
1055 // TODO: Fix base implementation which could simplify things a bit here
1056 // (seems to miss on differentiating on scalar/vector types).
1057
1058 // Only 64 bit vector conversions are natively supported before z15.
1059 if (DstScalarBits == 64 || ST->hasVectorEnhancements2()) {
1060 if (SrcScalarBits == DstScalarBits)
1061 return NumDstVectors;
1062
1063 if (SrcScalarBits == 1)
1064 return getBoolVecToIntConversionCost(Opcode, Dst, I) + NumDstVectors;
1065 }
1066
1067 // Return the cost of multiple scalar invocation plus the cost of
1068 // inserting and extracting the values. Base implementation does not
1069 // realize float->int gets scalarized.
1070 InstructionCost ScalarCost = getCastInstrCost(
1071 Opcode, Dst->getScalarType(), Src->getScalarType(), CCH, CostKind);
1072 InstructionCost TotCost = VF * ScalarCost;
1073 bool NeedsInserts = true, NeedsExtracts = true;
1074 // FP128 registers do not get inserted or extracted.
1075 if (DstScalarBits == 128 &&
1076 (Opcode == Instruction::SIToFP || Opcode == Instruction::UIToFP))
1077 NeedsInserts = false;
1078 if (SrcScalarBits == 128 &&
1079 (Opcode == Instruction::FPToSI || Opcode == Instruction::FPToUI))
1080 NeedsExtracts = false;
1081
1082 TotCost += BaseT::getScalarizationOverhead(SrcVecTy, /*Insert*/ false,
1083 NeedsExtracts, CostKind);
1084 TotCost += BaseT::getScalarizationOverhead(DstVecTy, NeedsInserts,
1085 /*Extract*/ false, CostKind);
1086
1087 // FIXME: VF 2 for float<->i32 is currently just as expensive as for VF 4.
1088 if (VF == 2 && SrcScalarBits == 32 && DstScalarBits == 32)
1089 TotCost *= 2;
1090
1091 return TotCost;
1092 }
1093
1094 if (Opcode == Instruction::FPTrunc) {
1095 if (SrcScalarBits == 128) // fp128 -> double/float + inserts of elements.
1096 return VF /*ldxbr/lexbr*/ +
1097 BaseT::getScalarizationOverhead(DstVecTy, /*Insert*/ true,
1098 /*Extract*/ false, CostKind);
1099 else // double -> float
1100 return VF / 2 /*vledb*/ + std::max(1U, VF / 4 /*vperm*/);
1101 }
1102
1103 if (Opcode == Instruction::FPExt) {
1104 if (SrcScalarBits == 32 && DstScalarBits == 64) {
1105 // float -> double is very rare and currently unoptimized. Instead of
1106 // using vldeb, which can do two at a time, all conversions are
1107 // scalarized.
1108 return VF * 2;
1109 }
1110 // -> fp128. VF * lxdb/lxeb + extraction of elements.
1111 return VF + BaseT::getScalarizationOverhead(SrcVecTy, /*Insert*/ false,
1112 /*Extract*/ true, CostKind);
1113 }
1114 }
1115
1116 return BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
1117}
1118
1119// Scalar i8 / i16 operations will typically be made after first extending
1120// the operands to i32.
1121static unsigned getOperandsExtensionCost(const Instruction *I) {
1122 unsigned ExtCost = 0;
1123 for (Value *Op : I->operands())
1124 // A load of i8 or i16 sign/zero extends to i32.
1126 ExtCost++;
1127
1128 return ExtCost;
1129}
1130
1132 unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred,
1134 TTI::OperandValueInfo Op2Info, const Instruction *I) const {
1136 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
1137 Op1Info, Op2Info);
1138
1139 if (!ValTy->isVectorTy()) {
1140 switch (Opcode) {
1141 case Instruction::ICmp: {
1142 // A loaded value compared with 0 with multiple users becomes Load and
1143 // Test. The load is then not foldable, so return 0 cost for the ICmp.
1144 unsigned ScalarBits = ValTy->getScalarSizeInBits();
1145 if (I != nullptr && (ScalarBits == 32 || ScalarBits == 64))
1146 if (LoadInst *Ld = dyn_cast<LoadInst>(I->getOperand(0)))
1147 if (const ConstantInt *C = dyn_cast<ConstantInt>(I->getOperand(1)))
1148 if (!Ld->hasOneUse() && Ld->getParent() == I->getParent() &&
1149 C->isZero())
1150 return 0;
1151
1152 unsigned Cost = 1;
1153 if (ValTy->isIntegerTy() && ValTy->getScalarSizeInBits() <= 16)
1154 Cost += (I != nullptr ? getOperandsExtensionCost(I) : 2);
1155 return Cost;
1156 }
1157 case Instruction::Select:
1158 if (ValTy->isFloatingPointTy())
1159 return 4; // No LOC for FP - costs a conditional jump.
1160
1161 // When selecting based on an i128 comparison, LOC / VSEL is possible
1162 // if i128 comparisons are directly supported.
1163 if (I != nullptr)
1164 if (ICmpInst *CI = dyn_cast<ICmpInst>(I->getOperand(0)))
1165 if (CI->getOperand(0)->getType()->isIntegerTy(128))
1166 return ST->hasVectorEnhancements3() ? 1 : 4;
1167
1168 // Load On Condition / Select Register available, except for i128.
1169 return !isInt128InVR(ValTy) ? 1 : 4;
1170 }
1171 }
1172 else if (ST->hasVector()) {
1173 unsigned VF = cast<FixedVectorType>(ValTy)->getNumElements();
1174
1175 // Called with a compare instruction.
1176 if (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) {
1177 unsigned PredicateExtraCost = 0;
1178 if (I != nullptr) {
1179 // Some predicates cost one or two extra instructions.
1180 switch (cast<CmpInst>(I)->getPredicate()) {
1186 PredicateExtraCost = 1;
1187 break;
1192 PredicateExtraCost = 2;
1193 break;
1194 default:
1195 break;
1196 }
1197 }
1198
1199 // Float is handled with 2*vmr[lh]f + 2*vldeb + vfchdb for each pair of
1200 // floats. FIXME: <2 x float> generates same code as <4 x float>.
1201 unsigned CmpCostPerVector = (ValTy->getScalarType()->isFloatTy() ? 10 : 1);
1202 unsigned NumVecs_cmp = getNumVectorRegs(ValTy);
1203
1204 unsigned Cost = (NumVecs_cmp * (CmpCostPerVector + PredicateExtraCost));
1205 return Cost;
1206 }
1207 else { // Called with a select instruction.
1208 assert (Opcode == Instruction::Select);
1209
1210 // We can figure out the extra cost of packing / unpacking if the
1211 // instruction was passed and the compare instruction is found.
1212 unsigned PackCost = 0;
1213 Type *CmpOpTy = ((I != nullptr) ? getCmpOpsType(I, VF) : nullptr);
1214 if (CmpOpTy != nullptr)
1215 PackCost =
1216 getVectorBitmaskConversionCost(CmpOpTy, ValTy);
1217
1218 return getNumVectorRegs(ValTy) /*vsel*/ + PackCost;
1219 }
1220 }
1221
1222 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
1223 Op1Info, Op2Info);
1224}
1225
1227 unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index,
1228 const Value *Op0, const Value *Op1, TTI::VectorInstrContext VIC) const {
1229 if (Opcode == Instruction::InsertElement) {
1230 // Vector Element Load.
1231 if (Op1 != nullptr && isFreeEltLoad(Op1))
1232 return 0;
1233
1234 // vlvgp will insert two grs into a vector register, so count half the
1235 // number of instructions as an estimate when we don't have the full
1236 // picture (as in getScalarizationOverhead()).
1237 if (Val->isIntOrIntVectorTy(64))
1238 return ((Index % 2 == 0) ? 1 : 0);
1239 }
1240
1241 if (Opcode == Instruction::ExtractElement) {
1242 int Cost = ((getScalarSizeInBits(Val) == 1) ? 2 /*+test-under-mask*/ : 1);
1243
1244 // Give a slight penalty for moving out of vector pipeline to FXU unit.
1245 if (Index == 0 && Val->isIntOrIntVectorTy())
1246 Cost += 1;
1247
1248 return Cost;
1249 }
1250
1251 return BaseT::getVectorInstrCost(Opcode, Val, CostKind, Index, Op0, Op1, VIC);
1252}
1253
1254// Check if a load may be folded as a memory operand in its user.
1256 const Instruction *&FoldedValue) const {
1257 if (!Ld->hasOneUse())
1258 return false;
1259 FoldedValue = Ld;
1260 const Instruction *UserI = cast<Instruction>(*Ld->user_begin());
1261 unsigned LoadedBits = getScalarSizeInBits(Ld->getType());
1262 unsigned TruncBits = 0;
1263 unsigned SExtBits = 0;
1264 unsigned ZExtBits = 0;
1265 if (UserI->hasOneUse()) {
1266 unsigned UserBits = UserI->getType()->getScalarSizeInBits();
1267 if (isa<TruncInst>(UserI))
1268 TruncBits = UserBits;
1269 else if (isa<SExtInst>(UserI))
1270 SExtBits = UserBits;
1271 else if (isa<ZExtInst>(UserI))
1272 ZExtBits = UserBits;
1273 }
1274 if (TruncBits || SExtBits || ZExtBits) {
1275 FoldedValue = UserI;
1276 UserI = cast<Instruction>(*UserI->user_begin());
1277 // Load (single use) -> trunc/extend (single use) -> UserI
1278 }
1279 if ((UserI->getOpcode() == Instruction::Sub ||
1280 UserI->getOpcode() == Instruction::SDiv ||
1281 UserI->getOpcode() == Instruction::UDiv) &&
1282 UserI->getOperand(1) != FoldedValue)
1283 return false; // Not commutative, only RHS foldable.
1284 // LoadOrTruncBits holds the number of effectively loaded bits, but 0 if an
1285 // extension was made of the load.
1286 unsigned LoadOrTruncBits =
1287 ((SExtBits || ZExtBits) ? 0 : (TruncBits ? TruncBits : LoadedBits));
1288 switch (UserI->getOpcode()) {
1289 case Instruction::Add: // SE: 16->32, 16/32->64, z14:16->64. ZE: 32->64
1290 case Instruction::Sub:
1291 case Instruction::ICmp:
1292 if (LoadedBits == 32 && ZExtBits == 64)
1293 return true;
1294 [[fallthrough]];
1295 case Instruction::Mul: // SE: 16->32, 32->64, z14:16->64
1296 if (UserI->getOpcode() != Instruction::ICmp) {
1297 if (LoadedBits == 16 &&
1298 (SExtBits == 32 ||
1299 (SExtBits == 64 && ST->hasMiscellaneousExtensions2())))
1300 return true;
1301 if (LoadOrTruncBits == 16)
1302 return true;
1303 }
1304 [[fallthrough]];
1305 case Instruction::SDiv:// SE: 32->64
1306 if (LoadedBits == 32 && SExtBits == 64)
1307 return true;
1308 [[fallthrough]];
1309 case Instruction::UDiv:
1310 case Instruction::And:
1311 case Instruction::Or:
1312 case Instruction::Xor:
1313 // This also makes sense for float operations, but disabled for now due
1314 // to regressions.
1315 // case Instruction::FCmp:
1316 // case Instruction::FAdd:
1317 // case Instruction::FSub:
1318 // case Instruction::FMul:
1319 // case Instruction::FDiv:
1320
1321 // All possible extensions of memory checked above.
1322
1323 // Comparison between memory and immediate.
1324 if (UserI->getOpcode() == Instruction::ICmp)
1325 if (ConstantInt *CI = dyn_cast<ConstantInt>(UserI->getOperand(1)))
1326 if (CI->getValue().isIntN(16))
1327 return true;
1328 return (LoadOrTruncBits == 32 || LoadOrTruncBits == 64);
1329 break;
1330 }
1331 return false;
1332}
1333
1334static bool isBswapIntrinsicCall(const Value *V) {
1335 if (const Instruction *I = dyn_cast<Instruction>(V))
1336 if (auto *CI = dyn_cast<CallInst>(I))
1337 if (auto *F = CI->getCalledFunction())
1338 if (F->getIntrinsicID() == Intrinsic::bswap)
1339 return true;
1340 return false;
1341}
1342
1344 Align Alignment,
1345 unsigned AddressSpace,
1347 TTI::OperandValueInfo OpInfo,
1348 const Instruction *I) const {
1349 assert(!Src->isVoidTy() && "Invalid type");
1350
1351 // TODO: Handle other cost kinds.
1353 return 1;
1354
1355 if (I && Opcode == Instruction::Store && !Src->isVectorTy()) {
1356 if (isFoldableRMW(dyn_cast<Instruction>(I->getOperand(0)), Src))
1357 return TTI::TCC_Free;
1358 }
1359
1360 if (!Src->isVectorTy() && Opcode == Instruction::Load && I != nullptr) {
1361 // Store the load or its truncated or extended value in FoldedValue.
1362 const Instruction *FoldedValue = nullptr;
1363 if (isFoldableLoad(cast<LoadInst>(I), FoldedValue)) {
1364 const Instruction *UserI = cast<Instruction>(*FoldedValue->user_begin());
1365 assert (UserI->getNumOperands() == 2 && "Expected a binop.");
1366
1367 // UserI can't fold two loads, so in that case return 0 cost only
1368 // half of the time.
1369 for (unsigned i = 0; i < 2; ++i) {
1370 if (UserI->getOperand(i) == FoldedValue)
1371 continue;
1372
1373 if (Instruction *OtherOp = dyn_cast<Instruction>(UserI->getOperand(i))){
1374 LoadInst *OtherLoad = dyn_cast<LoadInst>(OtherOp);
1375 if (!OtherLoad &&
1376 (isa<TruncInst>(OtherOp) || isa<SExtInst>(OtherOp) ||
1377 isa<ZExtInst>(OtherOp)))
1378 OtherLoad = dyn_cast<LoadInst>(OtherOp->getOperand(0));
1379 if (OtherLoad && isFoldableLoad(OtherLoad, FoldedValue/*dummy*/))
1380 return i == 0; // Both operands foldable.
1381 }
1382 }
1383
1384 return 0; // Only I is foldable in user.
1385 }
1386 }
1387
1388 // Type legalization (via getNumberOfParts) can't handle structs
1389 if (TLI->getValueType(DL, Src, true) == MVT::Other)
1390 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
1391 CostKind);
1392
1393 // FP128 is a legal type but kept in a register pair on older CPUs.
1394 if (Src->isFP128Ty() && !ST->hasVectorEnhancements1())
1395 return 2;
1396
1397 unsigned NumOps =
1398 (Src->isVectorTy() ? getNumVectorRegs(Src) : getNumberOfParts(Src));
1399
1400 // Store/Load reversed saves one instruction.
1401 if (((!Src->isVectorTy() && NumOps == 1) || ST->hasVectorEnhancements2()) &&
1402 I != nullptr) {
1403 if (Opcode == Instruction::Load && I->hasOneUse()) {
1404 const Instruction *LdUser = cast<Instruction>(*I->user_begin());
1405 // In case of load -> bswap -> store, return normal cost for the load.
1406 if (isBswapIntrinsicCall(LdUser) &&
1407 (!LdUser->hasOneUse() || !isa<StoreInst>(*LdUser->user_begin())))
1408 return 0;
1409 }
1410 else if (const StoreInst *SI = dyn_cast<StoreInst>(I)) {
1411 const Value *StoredVal = SI->getValueOperand();
1412 if (StoredVal->hasOneUse() && isBswapIntrinsicCall(StoredVal))
1413 return 0;
1414 }
1415 }
1416
1417 return NumOps;
1418}
1419
1420// The generic implementation of getInterleavedMemoryOpCost() is based on
1421// adding costs of the memory operations plus all the extracts and inserts
1422// needed for using / defining the vector operands. The SystemZ version does
1423// roughly the same but bases the computations on vector permutations
1424// instead.
1426 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
1427 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
1428 bool UseMaskForCond, bool UseMaskForGaps) const {
1429 if (UseMaskForCond || UseMaskForGaps)
1430 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
1431 Alignment, AddressSpace, CostKind,
1432 UseMaskForCond, UseMaskForGaps);
1433 assert(isa<VectorType>(VecTy) &&
1434 "Expect a vector type for interleaved memory op");
1435
1436 unsigned NumElts = cast<FixedVectorType>(VecTy)->getNumElements();
1437 assert(Factor > 1 && NumElts % Factor == 0 && "Invalid interleave factor");
1438 unsigned VF = NumElts / Factor;
1439 unsigned NumEltsPerVecReg = (128U / getScalarSizeInBits(VecTy));
1440 unsigned NumVectorMemOps = getNumVectorRegs(VecTy);
1441 unsigned NumPermutes = 0;
1442
1443 if (Opcode == Instruction::Load) {
1444 // Loading interleave groups may have gaps, which may mean fewer
1445 // loads. Find out how many vectors will be loaded in total, and in how
1446 // many of them each value will be in.
1447 BitVector UsedInsts(NumVectorMemOps, false);
1448 std::vector<BitVector> ValueVecs(Factor, BitVector(NumVectorMemOps, false));
1449 for (unsigned Index : Indices)
1450 for (unsigned Elt = 0; Elt < VF; ++Elt) {
1451 unsigned Vec = (Index + Elt * Factor) / NumEltsPerVecReg;
1452 UsedInsts.set(Vec);
1453 ValueVecs[Index].set(Vec);
1454 }
1455 NumVectorMemOps = UsedInsts.count();
1456
1457 for (unsigned Index : Indices) {
1458 // Estimate that each loaded source vector containing this Index
1459 // requires one operation, except that vperm can handle two input
1460 // registers first time for each dst vector.
1461 unsigned NumSrcVecs = ValueVecs[Index].count();
1462 unsigned NumDstVecs = divideCeil(VF * getScalarSizeInBits(VecTy), 128U);
1463 assert (NumSrcVecs >= NumDstVecs && "Expected at least as many sources");
1464 NumPermutes += std::max(1U, NumSrcVecs - NumDstVecs);
1465 }
1466 } else {
1467 // Estimate the permutes for each stored vector as the smaller of the
1468 // number of elements and the number of source vectors. Subtract one per
1469 // dst vector for vperm (S.A.).
1470 unsigned NumSrcVecs = std::min(NumEltsPerVecReg, Factor);
1471 unsigned NumDstVecs = NumVectorMemOps;
1472 NumPermutes += (NumDstVecs * NumSrcVecs) - NumDstVecs;
1473 }
1474
1475 // Cost of load/store operations and the permutations needed.
1476 return NumVectorMemOps + NumPermutes;
1477}
1478
1479InstructionCost getIntAddReductionCost(unsigned NumVec, unsigned ScalarBits) {
1480 InstructionCost Cost = 0;
1481 // Binary Tree of N/2 + N/4 + ... operations yields N - 1 operations total.
1482 Cost += NumVec - 1;
1483 // For integer adds, VSUM creates shorter reductions on the final vector.
1484 Cost += (ScalarBits < 32) ? 3 : 2;
1485 return Cost;
1486}
1487
1488InstructionCost getFastReductionCost(unsigned NumVec, unsigned NumElems,
1489 unsigned ScalarBits) {
1490 unsigned NumEltsPerVecReg = (SystemZ::VectorBits / ScalarBits);
1491 InstructionCost Cost = 0;
1492 // Binary Tree of N/2 + N/4 + ... operations yields N - 1 operations total.
1493 Cost += NumVec - 1;
1494 // For each shuffle / arithmetic layer, we need 2 instructions, and we need
1495 // log2(Elements in Last Vector) layers.
1496 Cost += 2 * Log2_32_Ceil(std::min(NumElems, NumEltsPerVecReg));
1497 return Cost;
1498}
1499
1500inline bool customCostReductions(unsigned Opcode) {
1501 return Opcode == Instruction::FAdd || Opcode == Instruction::FMul ||
1502 Opcode == Instruction::Add || Opcode == Instruction::Mul;
1503}
1504
1507 std::optional<FastMathFlags> FMF,
1509 unsigned ScalarBits = Ty->getScalarSizeInBits();
1510 // The following is only for subtargets with vector math, non-ordered
1511 // reductions, and reasonable scalar sizes for int and fp add/mul.
1512 if (customCostReductions(Opcode) && ST->hasVector() &&
1514 ScalarBits <= SystemZ::VectorBits) {
1515 unsigned NumVectors = getNumVectorRegs(Ty);
1516 unsigned NumElems = ((FixedVectorType *)Ty)->getNumElements();
1517 // Integer Add is using custom code gen, that needs to be accounted for.
1518 if (Opcode == Instruction::Add)
1519 return getIntAddReductionCost(NumVectors, ScalarBits);
1520 // The base cost is the same across all other arithmetic instructions
1522 getFastReductionCost(NumVectors, NumElems, ScalarBits);
1523 // But we need to account for the final op involving the scalar operand.
1524 if ((Opcode == Instruction::FAdd) || (Opcode == Instruction::FMul))
1525 Cost += 1;
1526 return Cost;
1527 }
1528 // otherwise, fall back to the standard implementation
1529 return BaseT::getArithmeticReductionCost(Opcode, Ty, FMF, CostKind);
1530}
1531
1534 FastMathFlags FMF,
1536 // Return custom costs only on subtargets with vector enhancements.
1537 if (ST->hasVectorEnhancements1()) {
1538 unsigned NumVectors = getNumVectorRegs(Ty);
1539 unsigned NumElems = ((FixedVectorType *)Ty)->getNumElements();
1540 unsigned ScalarBits = Ty->getScalarSizeInBits();
1542 // Binary Tree of N/2 + N/4 + ... operations yields N - 1 operations total.
1543 Cost += NumVectors - 1;
1544 // For the final vector, we need shuffle + min/max operations, and
1545 // we need #Elements - 1 of them.
1546 Cost += 2 * (std::min(NumElems, SystemZ::VectorBits / ScalarBits) - 1);
1547 return Cost;
1548 }
1549 // For other targets, fall back to the standard implementation
1550 return BaseT::getMinMaxReductionCost(IID, Ty, FMF, CostKind);
1551}
1552
1553static int
1555 const SmallVectorImpl<Type *> &ParamTys) {
1556 if (RetTy->isVectorTy() && ID == Intrinsic::bswap)
1557 return getNumVectorRegs(RetTy); // VPERM
1558
1559 return -1;
1560}
1561
1571
1573 // Always expand on Subtargets without vector instructions.
1574 if (!ST->hasVector())
1575 return true;
1576
1577 // Whether or not to expand is a per-intrinsic decision.
1578 switch (II->getIntrinsicID()) {
1579 default:
1580 return true;
1581 // Do not expand vector.reduce.add...
1582 case Intrinsic::vector_reduce_add:
1583 auto *VType = cast<FixedVectorType>(II->getOperand(0)->getType());
1584 // ...unless the scalar size is i64 or larger,
1585 // or the operand vector is not full, since the
1586 // performance benefit is dubious in those cases.
1587 return VType->getScalarSizeInBits() >= 64 ||
1588 VType->getPrimitiveSizeInBits() < SystemZ::VectorBits;
1589 }
1590}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Expand Atomic instructions
This file provides a helper that implements much of the TTI interface in terms of the target-independ...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
static unsigned InstrCount
Hexagon Common GEP
const HexagonInstrInfo * TII
This file defines an InstructionCost class that is used when calculating the cost of an instruction,...
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
static const Function * getCalledFunction(const Value *V)
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
#define P(N)
static unsigned getNumElements(Type *Ty)
#define LLVM_DEBUG(...)
Definition Debug.h:114
bool customCostReductions(unsigned Opcode)
static unsigned getElSizeLog2Diff(Type *Ty0, Type *Ty1)
static bool isBswapIntrinsicCall(const Value *V)
InstructionCost getIntAddReductionCost(unsigned NumVec, unsigned ScalarBits)
static void countNumMemAccesses(const Value *Ptr, unsigned &NumStores, unsigned &NumLoads, const Function *F)
static unsigned getOperandsExtensionCost(const Instruction *I)
static Type * getCmpOpsType(const Instruction *I, unsigned VF=1)
static unsigned getScalarSizeInBits(Type *Ty)
static bool isFoldableRMW(const Instruction *I, Type *Ty)
static bool isFreeEltLoad(const Value *Op)
InstructionCost getFastReductionCost(unsigned NumVec, unsigned NumElems, unsigned ScalarBits)
static int getVectorIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy, const SmallVectorImpl< Type * > &ParamTys)
static bool isUsedAsMemCpySource(const Value *V, bool &OtherUse)
static unsigned getNumVectorRegs(Type *Ty)
This file describes how to lower LLVM code to machine code.
This pass exposes codegen information to IR-level passes.
Class for arbitrary precision integers.
Definition APInt.h:78
bool isNegatedPowerOf2() const
Check if this APInt's negated value is a power of two greater than zero.
Definition APInt.h:450
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition APInt.h:441
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false) const override
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind) const override
unsigned getNumberOfParts(Type *Tp) const override
TTI::ShuffleKind improveShuffleKindFromMask(TTI::ShuffleKind Kind, ArrayRef< int > Mask, VectorType *SrcTy, int &Index, VectorType *&SubTy) const
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
InstructionCost getScalarizationOverhead(VectorType *InTy, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, bool ForPoisonSrc=true, ArrayRef< Value * > VL={}, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const override
InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind) const override
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP) const override
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, const Value *Op0, const Value *Op1, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const override
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const override
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
size_type count() const
count - Returns the number of bits which are set.
Definition BitVector.h:181
BitVector & set()
Definition BitVector.h:370
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Value * getArgOperand(unsigned i) const
This class is the base class for the comparison instructions.
Definition InstrTypes.h:664
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:706
@ ICMP_UGE
unsigned greater or equal
Definition InstrTypes.h:700
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition InstrTypes.h:684
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition InstrTypes.h:687
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition InstrTypes.h:685
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:704
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:702
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition InstrTypes.h:686
This is the shared class of boolean and integer constants.
Definition Constants.h:87
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:159
This is an important base class in LLVM.
Definition Constant.h:43
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:23
Class to represent fixed width SIMD vectors.
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
Definition Type.cpp:873
This instruction compares its operands according to the predicate given to the constructor.
CostType getValue() const
This function is intended to be used as sparingly as possible, since the class provides the full rang...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
const SmallVectorImpl< Type * > & getArgTypes() const
A wrapper class for inspecting calls to intrinsic functions.
An instruction for reading from memory.
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
This class wraps the llvm.memcpy intrinsic.
The optimization diagnostic interface.
The main scalar evolution driver.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
InstructionCost getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, bool ForPoisonSrc=true, ArrayRef< Value * > VL={}, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const override
Estimate the overhead of scalarizing an instruction.
bool isFoldableLoad(const LoadInst *Ld, const Instruction *&FoldedValue) const
bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1, const TargetTransformInfo::LSRCost &C2) const override
InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind) const override
Try to calculate op costs for min/max reduction operations.
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
unsigned getNumberOfRegisters(unsigned ClassID) const override
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP) const override
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const override
Get intrinsic cost based on arguments.
unsigned getMinPrefetchStride(unsigned NumMemAccesses, unsigned NumStridedMemAccesses, unsigned NumPrefetches, bool HasCall) const override
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, const Value *Op0, const Value *Op1, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const override
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind) const override
unsigned getVectorBitmaskConversionCost(Type *SrcTy, Type *DstTy) const
unsigned getBoolVecToIntConversionCost(unsigned Opcode, Type *Dst, const Instruction *I) const
InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind, Instruction *Inst=nullptr) const override
bool shouldExpandReduction(const IntrinsicInst *II) const override
TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth) const override
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false) const override
InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind) const override
bool hasDivRemOp(Type *DataType, bool IsSigned) const override
unsigned getVectorTruncCost(Type *SrcTy, Type *DstTy) const
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE) const override
unsigned adjustInliningThreshold(const CallBase *CB) const override
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const override
InstructionCost getIntImmCost(const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind) const override
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
virtual bool isLoweredToCall(const Function *F) const
VectorInstrContext
Represents a hint about the context in which an insert/extract is used.
TargetCostKind
The kind of cost model.
@ TCK_RecipThroughput
Reciprocal throughput.
@ TCK_CodeSize
Instruction code size.
@ TCK_SizeAndLatency
The weighted sum of size and latency.
static bool requiresOrderedReduction(std::optional< FastMathFlags > FMF)
A helper function to determine the type of reduction algorithm used for a given Opcode and set of Fas...
PopcntSupportKind
Flags indicating the kind of support for population count.
@ TCC_Free
Expected to fold away in lowering.
@ TCC_Basic
The cost of a typical 'add' instruction.
ShuffleKind
The various kinds of shuffle patterns for vector queries.
@ SK_Broadcast
Broadcast element 0 to all other elements.
@ SK_ExtractSubvector
ExtractSubvector Index indicates start offset.
CastContextHint
Represents a hint about the context in which a cast is used.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition TypeSize.h:343
static constexpr TypeSize getScalable(ScalarTy MinimumSize)
Definition TypeSize.h:346
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:290
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition Type.h:263
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:370
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:201
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:236
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:186
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:257
Value * getOperand(unsigned i) const
Definition User.h:207
unsigned getNumOperands() const
Definition User.h:229
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:255
user_iterator user_begin()
Definition Value.h:402
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition Value.h:439
iterator_range< user_iterator > users()
Definition Value.h:426
Base class of all SIMD vector types.
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
const ParentTy * getParent() const
Definition ilist_node.h:34
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
const unsigned VectorBits
Definition SystemZ.h:155
This is an optimization pass for GlobalISel generic memory operations.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
Definition MathExtras.h:344
InstructionCost Cost
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:331
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:189
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
Definition MathExtras.h:394
DWARFExpression::Operation Op
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Extended Value Type.
Definition ValueTypes.h:35
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
Definition ValueTypes.h:165
unsigned Insns
TODO: Some of these could be merged.
Parameters that control the generic loop unrolling transformation.
bool Force
Apply loop unroll on any kind of loop (mainly to loops that fail runtime unrolling).
unsigned DefaultUnrollRuntimeCount
Default unroll count for loops with run-time trip count.
unsigned FullUnrollMaxCount
Set the maximum unrolling factor for full unrolling.
unsigned PartialThreshold
The cost threshold for the unrolled loop, like Threshold, but used for partial/runtime unrolling (set...
bool Runtime
Allow runtime unrolling (unrolling of loops to expand the size of the loop body even when the number ...
bool Partial
Allow partial unrolling (unrolling of loops to expand the size of the loop body, not only to eliminat...
bool AllowExpensiveTripCount
Allow emitting expensive instructions (such as divisions) when computing the trip count of a loop for...