Bug Summary

File:lib/Target/AArch64/AArch64TargetTransformInfo.cpp
Warning:line 698, column 21
The result of the left shift is undefined due to shifting by '4294967295', which is greater or equal to the width of type 'int'

Annotated Source Code

1//===-- AArch64TargetTransformInfo.cpp - AArch64 specific TTI -------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "AArch64TargetTransformInfo.h"
11#include "MCTargetDesc/AArch64AddressingModes.h"
12#include "llvm/Analysis/LoopInfo.h"
13#include "llvm/Analysis/TargetTransformInfo.h"
14#include "llvm/CodeGen/BasicTTIImpl.h"
15#include "llvm/IR/IntrinsicInst.h"
16#include "llvm/Support/Debug.h"
17#include "llvm/Target/CostTable.h"
18#include "llvm/Target/TargetLowering.h"
19#include <algorithm>
20using namespace llvm;
21
22#define DEBUG_TYPE"aarch64tti" "aarch64tti"
23
24static cl::opt<bool> EnableFalkorHWPFUnrollFix("enable-falkor-hwpf-unroll-fix",
25 cl::init(true), cl::Hidden);
26
27bool AArch64TTIImpl::areInlineCompatible(const Function *Caller,
28 const Function *Callee) const {
29 const TargetMachine &TM = getTLI()->getTargetMachine();
30
31 const FeatureBitset &CallerBits =
32 TM.getSubtargetImpl(*Caller)->getFeatureBits();
33 const FeatureBitset &CalleeBits =
34 TM.getSubtargetImpl(*Callee)->getFeatureBits();
35
36 // Inline a callee if its target-features are a subset of the callers
37 // target-features.
38 return (CallerBits & CalleeBits) == CalleeBits;
39}
40
41/// \brief Calculate the cost of materializing a 64-bit value. This helper
42/// method might only calculate a fraction of a larger immediate. Therefore it
43/// is valid to return a cost of ZERO.
44int AArch64TTIImpl::getIntImmCost(int64_t Val) {
45 // Check if the immediate can be encoded within an instruction.
46 if (Val == 0 || AArch64_AM::isLogicalImmediate(Val, 64))
47 return 0;
48
49 if (Val < 0)
50 Val = ~Val;
51
52 // Calculate how many moves we will need to materialize this constant.
53 unsigned LZ = countLeadingZeros((uint64_t)Val);
54 return (64 - LZ + 15) / 16;
55}
56
57/// \brief Calculate the cost of materializing the given constant.
58int AArch64TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
59 assert(Ty->isIntegerTy())((Ty->isIntegerTy()) ? static_cast<void> (0) : __assert_fail
("Ty->isIntegerTy()", "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Target/AArch64/AArch64TargetTransformInfo.cpp"
, 59, __PRETTY_FUNCTION__))
;
60
61 unsigned BitSize = Ty->getPrimitiveSizeInBits();
62 if (BitSize == 0)
63 return ~0U;
64
65 // Sign-extend all constants to a multiple of 64-bit.
66 APInt ImmVal = Imm;
67 if (BitSize & 0x3f)
68 ImmVal = Imm.sext((BitSize + 63) & ~0x3fU);
69
70 // Split the constant into 64-bit chunks and calculate the cost for each
71 // chunk.
72 int Cost = 0;
73 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
74 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
75 int64_t Val = Tmp.getSExtValue();
76 Cost += getIntImmCost(Val);
77 }
78 // We need at least one instruction to materialze the constant.
79 return std::max(1, Cost);
80}
81
82int AArch64TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx,
83 const APInt &Imm, Type *Ty) {
84 assert(Ty->isIntegerTy())((Ty->isIntegerTy()) ? static_cast<void> (0) : __assert_fail
("Ty->isIntegerTy()", "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Target/AArch64/AArch64TargetTransformInfo.cpp"
, 84, __PRETTY_FUNCTION__))
;
85
86 unsigned BitSize = Ty->getPrimitiveSizeInBits();
87 // There is no cost model for constants with a bit size of 0. Return TCC_Free
88 // here, so that constant hoisting will ignore this constant.
89 if (BitSize == 0)
90 return TTI::TCC_Free;
91
92 unsigned ImmIdx = ~0U;
93 switch (Opcode) {
94 default:
95 return TTI::TCC_Free;
96 case Instruction::GetElementPtr:
97 // Always hoist the base address of a GetElementPtr.
98 if (Idx == 0)
99 return 2 * TTI::TCC_Basic;
100 return TTI::TCC_Free;
101 case Instruction::Store:
102 ImmIdx = 0;
103 break;
104 case Instruction::Add:
105 case Instruction::Sub:
106 case Instruction::Mul:
107 case Instruction::UDiv:
108 case Instruction::SDiv:
109 case Instruction::URem:
110 case Instruction::SRem:
111 case Instruction::And:
112 case Instruction::Or:
113 case Instruction::Xor:
114 case Instruction::ICmp:
115 ImmIdx = 1;
116 break;
117 // Always return TCC_Free for the shift value of a shift instruction.
118 case Instruction::Shl:
119 case Instruction::LShr:
120 case Instruction::AShr:
121 if (Idx == 1)
122 return TTI::TCC_Free;
123 break;
124 case Instruction::Trunc:
125 case Instruction::ZExt:
126 case Instruction::SExt:
127 case Instruction::IntToPtr:
128 case Instruction::PtrToInt:
129 case Instruction::BitCast:
130 case Instruction::PHI:
131 case Instruction::Call:
132 case Instruction::Select:
133 case Instruction::Ret:
134 case Instruction::Load:
135 break;
136 }
137
138 if (Idx == ImmIdx) {
139 int NumConstants = (BitSize + 63) / 64;
140 int Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty);
141 return (Cost <= NumConstants * TTI::TCC_Basic)
142 ? static_cast<int>(TTI::TCC_Free)
143 : Cost;
144 }
145 return AArch64TTIImpl::getIntImmCost(Imm, Ty);
146}
147
148int AArch64TTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
149 const APInt &Imm, Type *Ty) {
150 assert(Ty->isIntegerTy())((Ty->isIntegerTy()) ? static_cast<void> (0) : __assert_fail
("Ty->isIntegerTy()", "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Target/AArch64/AArch64TargetTransformInfo.cpp"
, 150, __PRETTY_FUNCTION__))
;
151
152 unsigned BitSize = Ty->getPrimitiveSizeInBits();
153 // There is no cost model for constants with a bit size of 0. Return TCC_Free
154 // here, so that constant hoisting will ignore this constant.
155 if (BitSize == 0)
156 return TTI::TCC_Free;
157
158 switch (IID) {
159 default:
160 return TTI::TCC_Free;
161 case Intrinsic::sadd_with_overflow:
162 case Intrinsic::uadd_with_overflow:
163 case Intrinsic::ssub_with_overflow:
164 case Intrinsic::usub_with_overflow:
165 case Intrinsic::smul_with_overflow:
166 case Intrinsic::umul_with_overflow:
167 if (Idx == 1) {
168 int NumConstants = (BitSize + 63) / 64;
169 int Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty);
170 return (Cost <= NumConstants * TTI::TCC_Basic)
171 ? static_cast<int>(TTI::TCC_Free)
172 : Cost;
173 }
174 break;
175 case Intrinsic::experimental_stackmap:
176 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
177 return TTI::TCC_Free;
178 break;
179 case Intrinsic::experimental_patchpoint_void:
180 case Intrinsic::experimental_patchpoint_i64:
181 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
182 return TTI::TCC_Free;
183 break;
184 }
185 return AArch64TTIImpl::getIntImmCost(Imm, Ty);
186}
187
188TargetTransformInfo::PopcntSupportKind
189AArch64TTIImpl::getPopcntSupport(unsigned TyWidth) {
190 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2")((isPowerOf2_32(TyWidth) && "Ty width must be power of 2"
) ? static_cast<void> (0) : __assert_fail ("isPowerOf2_32(TyWidth) && \"Ty width must be power of 2\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Target/AArch64/AArch64TargetTransformInfo.cpp"
, 190, __PRETTY_FUNCTION__))
;
191 if (TyWidth == 32 || TyWidth == 64)
192 return TTI::PSK_FastHardware;
193 // TODO: AArch64TargetLowering::LowerCTPOP() supports 128bit popcount.
194 return TTI::PSK_Software;
195}
196
197bool AArch64TTIImpl::isWideningInstruction(Type *DstTy, unsigned Opcode,
198 ArrayRef<const Value *> Args) {
199
200 // A helper that returns a vector type from the given type. The number of
201 // elements in type Ty determine the vector width.
202 auto toVectorTy = [&](Type *ArgTy) {
203 return VectorType::get(ArgTy->getScalarType(),
204 DstTy->getVectorNumElements());
205 };
206
207 // Exit early if DstTy is not a vector type whose elements are at least
208 // 16-bits wide.
209 if (!DstTy->isVectorTy() || DstTy->getScalarSizeInBits() < 16)
210 return false;
211
212 // Determine if the operation has a widening variant. We consider both the
213 // "long" (e.g., usubl) and "wide" (e.g., usubw) versions of the
214 // instructions.
215 //
216 // TODO: Add additional widening operations (e.g., mul, shl, etc.) once we
217 // verify that their extending operands are eliminated during code
218 // generation.
219 switch (Opcode) {
220 case Instruction::Add: // UADDL(2), SADDL(2), UADDW(2), SADDW(2).
221 case Instruction::Sub: // USUBL(2), SSUBL(2), USUBW(2), SSUBW(2).
222 break;
223 default:
224 return false;
225 }
226
227 // To be a widening instruction (either the "wide" or "long" versions), the
228 // second operand must be a sign- or zero extend having a single user. We
229 // only consider extends having a single user because they may otherwise not
230 // be eliminated.
231 if (Args.size() != 2 ||
232 (!isa<SExtInst>(Args[1]) && !isa<ZExtInst>(Args[1])) ||
233 !Args[1]->hasOneUse())
234 return false;
235 auto *Extend = cast<CastInst>(Args[1]);
236
237 // Legalize the destination type and ensure it can be used in a widening
238 // operation.
239 auto DstTyL = TLI->getTypeLegalizationCost(DL, DstTy);
240 unsigned DstElTySize = DstTyL.second.getScalarSizeInBits();
241 if (!DstTyL.second.isVector() || DstElTySize != DstTy->getScalarSizeInBits())
242 return false;
243
244 // Legalize the source type and ensure it can be used in a widening
245 // operation.
246 Type *SrcTy = toVectorTy(Extend->getSrcTy());
247 auto SrcTyL = TLI->getTypeLegalizationCost(DL, SrcTy);
248 unsigned SrcElTySize = SrcTyL.second.getScalarSizeInBits();
249 if (!SrcTyL.second.isVector() || SrcElTySize != SrcTy->getScalarSizeInBits())
250 return false;
251
252 // Get the total number of vector elements in the legalized types.
253 unsigned NumDstEls = DstTyL.first * DstTyL.second.getVectorNumElements();
254 unsigned NumSrcEls = SrcTyL.first * SrcTyL.second.getVectorNumElements();
255
256 // Return true if the legalized types have the same number of vector elements
257 // and the destination element type size is twice that of the source type.
258 return NumDstEls == NumSrcEls && 2 * SrcElTySize == DstElTySize;
259}
260
261int AArch64TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
262 const Instruction *I) {
263 int ISD = TLI->InstructionOpcodeToISD(Opcode);
264 assert(ISD && "Invalid opcode")((ISD && "Invalid opcode") ? static_cast<void> (
0) : __assert_fail ("ISD && \"Invalid opcode\"", "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Target/AArch64/AArch64TargetTransformInfo.cpp"
, 264, __PRETTY_FUNCTION__))
;
265
266 // If the cast is observable, and it is used by a widening instruction (e.g.,
267 // uaddl, saddw, etc.), it may be free.
268 if (I && I->hasOneUse()) {
269 auto *SingleUser = cast<Instruction>(*I->user_begin());
270 SmallVector<const Value *, 4> Operands(SingleUser->operand_values());
271 if (isWideningInstruction(Dst, SingleUser->getOpcode(), Operands)) {
272 // If the cast is the second operand, it is free. We will generate either
273 // a "wide" or "long" version of the widening instruction.
274 if (I == SingleUser->getOperand(1))
275 return 0;
276 // If the cast is not the second operand, it will be free if it looks the
277 // same as the second operand. In this case, we will generate a "long"
278 // version of the widening instruction.
279 if (auto *Cast = dyn_cast<CastInst>(SingleUser->getOperand(1)))
280 if (I->getOpcode() == Cast->getOpcode() &&
281 cast<CastInst>(I)->getSrcTy() == Cast->getSrcTy())
282 return 0;
283 }
284 }
285
286 EVT SrcTy = TLI->getValueType(DL, Src);
287 EVT DstTy = TLI->getValueType(DL, Dst);
288
289 if (!SrcTy.isSimple() || !DstTy.isSimple())
290 return BaseT::getCastInstrCost(Opcode, Dst, Src);
291
292 static const TypeConversionCostTblEntry
293 ConversionTbl[] = {
294 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 },
295 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 0 },
296 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 },
297 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 6 },
298
299 // The number of shll instructions for the extension.
300 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
301 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
302 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 2 },
303 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 2 },
304 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
305 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
306 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 2 },
307 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 2 },
308 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 7 },
309 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 7 },
310 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 6 },
311 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 6 },
312 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
313 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
314 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
315 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
316
317 // LowerVectorINT_TO_FP:
318 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
319 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
320 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
321 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
322 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
323 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
324
325 // Complex: to v2f32
326 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 },
327 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 },
328 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 },
329 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 },
330 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 },
331 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 },
332
333 // Complex: to v4f32
334 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 4 },
335 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
336 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
337 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
338
339 // Complex: to v8f32
340 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 10 },
341 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
342 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 10 },
343 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
344
345 // Complex: to v16f32
346 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 21 },
347 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 21 },
348
349 // Complex: to v2f64
350 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 },
351 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 },
352 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
353 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 },
354 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 },
355 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
356
357
358 // LowerVectorFP_TO_INT
359 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f32, 1 },
360 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 },
361 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 },
362 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 },
363 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 },
364 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 },
365
366 // Complex, from v2f32: legal type is v2i32 (no cost) or v2i64 (1 ext).
367 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 2 },
368 { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f32, 1 },
369 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f32, 1 },
370 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 2 },
371 { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f32, 1 },
372 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f32, 1 },
373
374 // Complex, from v4f32: legal type is v4i16, 1 narrowing => ~2
375 { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 },
376 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 2 },
377 { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 },
378 { ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f32, 2 },
379
380 // Complex, from v2f64: legal type is v2i32, 1 narrowing => ~2.
381 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 2 },
382 { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f64, 2 },
383 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f64, 2 },
384 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 2 },
385 { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f64, 2 },
386 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f64, 2 },
387 };
388
389 if (const auto *Entry = ConvertCostTableLookup(ConversionTbl, ISD,
390 DstTy.getSimpleVT(),
391 SrcTy.getSimpleVT()))
392 return Entry->Cost;
393
394 return BaseT::getCastInstrCost(Opcode, Dst, Src);
395}
396
397int AArch64TTIImpl::getExtractWithExtendCost(unsigned Opcode, Type *Dst,
398 VectorType *VecTy,
399 unsigned Index) {
400
401 // Make sure we were given a valid extend opcode.
402 assert((Opcode == Instruction::SExt || Opcode == Instruction::ZExt) &&(((Opcode == Instruction::SExt || Opcode == Instruction::ZExt
) && "Invalid opcode") ? static_cast<void> (0) :
__assert_fail ("(Opcode == Instruction::SExt || Opcode == Instruction::ZExt) && \"Invalid opcode\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Target/AArch64/AArch64TargetTransformInfo.cpp"
, 403, __PRETTY_FUNCTION__))
403 "Invalid opcode")(((Opcode == Instruction::SExt || Opcode == Instruction::ZExt
) && "Invalid opcode") ? static_cast<void> (0) :
__assert_fail ("(Opcode == Instruction::SExt || Opcode == Instruction::ZExt) && \"Invalid opcode\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Target/AArch64/AArch64TargetTransformInfo.cpp"
, 403, __PRETTY_FUNCTION__))
;
404
405 // We are extending an element we extract from a vector, so the source type
406 // of the extend is the element type of the vector.
407 auto *Src = VecTy->getElementType();
408
409 // Sign- and zero-extends are for integer types only.
410 assert(isa<IntegerType>(Dst) && isa<IntegerType>(Src) && "Invalid type")((isa<IntegerType>(Dst) && isa<IntegerType>
(Src) && "Invalid type") ? static_cast<void> (0
) : __assert_fail ("isa<IntegerType>(Dst) && isa<IntegerType>(Src) && \"Invalid type\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Target/AArch64/AArch64TargetTransformInfo.cpp"
, 410, __PRETTY_FUNCTION__))
;
411
412 // Get the cost for the extract. We compute the cost (if any) for the extend
413 // below.
414 auto Cost = getVectorInstrCost(Instruction::ExtractElement, VecTy, Index);
415
416 // Legalize the types.
417 auto VecLT = TLI->getTypeLegalizationCost(DL, VecTy);
418 auto DstVT = TLI->getValueType(DL, Dst);
419 auto SrcVT = TLI->getValueType(DL, Src);
420
421 // If the resulting type is still a vector and the destination type is legal,
422 // we may get the extension for free. If not, get the default cost for the
423 // extend.
424 if (!VecLT.second.isVector() || !TLI->isTypeLegal(DstVT))
425 return Cost + getCastInstrCost(Opcode, Dst, Src);
426
427 // The destination type should be larger than the element type. If not, get
428 // the default cost for the extend.
429 if (DstVT.getSizeInBits() < SrcVT.getSizeInBits())
430 return Cost + getCastInstrCost(Opcode, Dst, Src);
431
432 switch (Opcode) {
433 default:
434 llvm_unreachable("Opcode should be either SExt or ZExt")::llvm::llvm_unreachable_internal("Opcode should be either SExt or ZExt"
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Target/AArch64/AArch64TargetTransformInfo.cpp"
, 434)
;
435
436 // For sign-extends, we only need a smov, which performs the extension
437 // automatically.
438 case Instruction::SExt:
439 return Cost;
440
441 // For zero-extends, the extend is performed automatically by a umov unless
442 // the destination type is i64 and the element type is i8 or i16.
443 case Instruction::ZExt:
444 if (DstVT.getSizeInBits() != 64u || SrcVT.getSizeInBits() == 32u)
445 return Cost;
446 }
447
448 // If we are unable to perform the extend for free, get the default cost.
449 return Cost + getCastInstrCost(Opcode, Dst, Src);
450}
451
452int AArch64TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
453 unsigned Index) {
454 assert(Val->isVectorTy() && "This must be a vector type")((Val->isVectorTy() && "This must be a vector type"
) ? static_cast<void> (0) : __assert_fail ("Val->isVectorTy() && \"This must be a vector type\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Target/AArch64/AArch64TargetTransformInfo.cpp"
, 454, __PRETTY_FUNCTION__))
;
455
456 if (Index != -1U) {
457 // Legalize the type.
458 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Val);
459
460 // This type is legalized to a scalar type.
461 if (!LT.second.isVector())
462 return 0;
463
464 // The type may be split. Normalize the index to the new type.
465 unsigned Width = LT.second.getVectorNumElements();
466 Index = Index % Width;
467
468 // The element at index zero is already inside the vector.
469 if (Index == 0)
470 return 0;
471 }
472
473 // All other insert/extracts cost this much.
474 return ST->getVectorInsertExtractBaseCost();
475}
476
477int AArch64TTIImpl::getArithmeticInstrCost(
478 unsigned Opcode, Type *Ty, TTI::OperandValueKind Opd1Info,
479 TTI::OperandValueKind Opd2Info, TTI::OperandValueProperties Opd1PropInfo,
480 TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args) {
481 // Legalize the type.
482 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
483
484 // If the instruction is a widening instruction (e.g., uaddl, saddw, etc.),
485 // add in the widening overhead specified by the sub-target. Since the
486 // extends feeding widening instructions are performed automatically, they
487 // aren't present in the generated code and have a zero cost. By adding a
488 // widening overhead here, we attach the total cost of the combined operation
489 // to the widening instruction.
490 int Cost = 0;
491 if (isWideningInstruction(Ty, Opcode, Args))
492 Cost += ST->getWideningBaseCost();
493
494 int ISD = TLI->InstructionOpcodeToISD(Opcode);
495
496 if (ISD == ISD::SDIV &&
497 Opd2Info == TargetTransformInfo::OK_UniformConstantValue &&
498 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) {
499 // On AArch64, scalar signed division by constants power-of-two are
500 // normally expanded to the sequence ADD + CMP + SELECT + SRA.
501 // The OperandValue properties many not be same as that of previous
502 // operation; conservatively assume OP_None.
503 Cost += getArithmeticInstrCost(Instruction::Add, Ty, Opd1Info, Opd2Info,
504 TargetTransformInfo::OP_None,
505 TargetTransformInfo::OP_None);
506 Cost += getArithmeticInstrCost(Instruction::Sub, Ty, Opd1Info, Opd2Info,
507 TargetTransformInfo::OP_None,
508 TargetTransformInfo::OP_None);
509 Cost += getArithmeticInstrCost(Instruction::Select, Ty, Opd1Info, Opd2Info,
510 TargetTransformInfo::OP_None,
511 TargetTransformInfo::OP_None);
512 Cost += getArithmeticInstrCost(Instruction::AShr, Ty, Opd1Info, Opd2Info,
513 TargetTransformInfo::OP_None,
514 TargetTransformInfo::OP_None);
515 return Cost;
516 }
517
518 switch (ISD) {
519 default:
520 return Cost + BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
521 Opd1PropInfo, Opd2PropInfo);
522 case ISD::ADD:
523 case ISD::MUL:
524 case ISD::XOR:
525 case ISD::OR:
526 case ISD::AND:
527 // These nodes are marked as 'custom' for combining purposes only.
528 // We know that they are legal. See LowerAdd in ISelLowering.
529 return (Cost + 1) * LT.first;
530 }
531}
532
533int AArch64TTIImpl::getAddressComputationCost(Type *Ty, ScalarEvolution *SE,
534 const SCEV *Ptr) {
535 // Address computations in vectorized code with non-consecutive addresses will
536 // likely result in more instructions compared to scalar code where the
537 // computation can more often be merged into the index mode. The resulting
538 // extra micro-ops can significantly decrease throughput.
539 unsigned NumVectorInstToHideOverhead = 10;
540 int MaxMergeDistance = 64;
541
542 if (Ty->isVectorTy() && SE &&
543 !BaseT::isConstantStridedAccessLessThan(SE, Ptr, MaxMergeDistance + 1))
544 return NumVectorInstToHideOverhead;
545
546 // In many cases the address computation is not merged into the instruction
547 // addressing mode.
548 return 1;
549}
550
551int AArch64TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
552 Type *CondTy, const Instruction *I) {
553
554 int ISD = TLI->InstructionOpcodeToISD(Opcode);
555 // We don't lower some vector selects well that are wider than the register
556 // width.
557 if (ValTy->isVectorTy() && ISD == ISD::SELECT) {
558 // We would need this many instructions to hide the scalarization happening.
559 const int AmortizationCost = 20;
560 static const TypeConversionCostTblEntry
561 VectorSelectTbl[] = {
562 { ISD::SELECT, MVT::v16i1, MVT::v16i16, 16 },
563 { ISD::SELECT, MVT::v8i1, MVT::v8i32, 8 },
564 { ISD::SELECT, MVT::v16i1, MVT::v16i32, 16 },
565 { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4 * AmortizationCost },
566 { ISD::SELECT, MVT::v8i1, MVT::v8i64, 8 * AmortizationCost },
567 { ISD::SELECT, MVT::v16i1, MVT::v16i64, 16 * AmortizationCost }
568 };
569
570 EVT SelCondTy = TLI->getValueType(DL, CondTy);
571 EVT SelValTy = TLI->getValueType(DL, ValTy);
572 if (SelCondTy.isSimple() && SelValTy.isSimple()) {
573 if (const auto *Entry = ConvertCostTableLookup(VectorSelectTbl, ISD,
574 SelCondTy.getSimpleVT(),
575 SelValTy.getSimpleVT()))
576 return Entry->Cost;
577 }
578 }
579 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, I);
580}
581
582int AArch64TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Ty,
583 unsigned Alignment, unsigned AddressSpace,
584 const Instruction *I) {
585 auto LT = TLI->getTypeLegalizationCost(DL, Ty);
586
587 if (ST->isMisaligned128StoreSlow() && Opcode == Instruction::Store &&
588 LT.second.is128BitVector() && Alignment < 16) {
589 // Unaligned stores are extremely inefficient. We don't split all
590 // unaligned 128-bit stores because the negative impact that has shown in
591 // practice on inlined block copy code.
592 // We make such stores expensive so that we will only vectorize if there
593 // are 6 other instructions getting vectorized.
594 const int AmortizationCost = 6;
595
596 return LT.first * 2 * AmortizationCost;
597 }
598
599 if (Ty->isVectorTy() && Ty->getVectorElementType()->isIntegerTy(8) &&
600 Ty->getVectorNumElements() < 8) {
601 // We scalarize the loads/stores because there is not v.4b register and we
602 // have to promote the elements to v.4h.
603 unsigned NumVecElts = Ty->getVectorNumElements();
604 unsigned NumVectorizableInstsToAmortize = NumVecElts * 2;
605 // We generate 2 instructions per vector element.
606 return NumVectorizableInstsToAmortize * NumVecElts * 2;
607 }
608
609 return LT.first;
610}
611
612int AArch64TTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
613 unsigned Factor,
614 ArrayRef<unsigned> Indices,
615 unsigned Alignment,
616 unsigned AddressSpace) {
617 assert(Factor >= 2 && "Invalid interleave factor")((Factor >= 2 && "Invalid interleave factor") ? static_cast
<void> (0) : __assert_fail ("Factor >= 2 && \"Invalid interleave factor\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Target/AArch64/AArch64TargetTransformInfo.cpp"
, 617, __PRETTY_FUNCTION__))
;
618 assert(isa<VectorType>(VecTy) && "Expect a vector type")((isa<VectorType>(VecTy) && "Expect a vector type"
) ? static_cast<void> (0) : __assert_fail ("isa<VectorType>(VecTy) && \"Expect a vector type\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Target/AArch64/AArch64TargetTransformInfo.cpp"
, 618, __PRETTY_FUNCTION__))
;
619
620 if (Factor <= TLI->getMaxSupportedInterleaveFactor()) {
621 unsigned NumElts = VecTy->getVectorNumElements();
622 auto *SubVecTy = VectorType::get(VecTy->getScalarType(), NumElts / Factor);
623
624 // ldN/stN only support legal vector types of size 64 or 128 in bits.
625 // Accesses having vector types that are a multiple of 128 bits can be
626 // matched to more than one ldN/stN instruction.
627 if (NumElts % Factor == 0 &&
628 TLI->isLegalInterleavedAccessType(SubVecTy, DL))
629 return Factor * TLI->getNumInterleavedAccesses(SubVecTy, DL);
630 }
631
632 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
633 Alignment, AddressSpace);
634}
635
636int AArch64TTIImpl::getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) {
637 int Cost = 0;
638 for (auto *I : Tys) {
639 if (!I->isVectorTy())
640 continue;
641 if (I->getScalarSizeInBits() * I->getVectorNumElements() == 128)
642 Cost += getMemoryOpCost(Instruction::Store, I, 128, 0) +
643 getMemoryOpCost(Instruction::Load, I, 128, 0);
644 }
645 return Cost;
646}
647
648unsigned AArch64TTIImpl::getMaxInterleaveFactor(unsigned VF) {
649 return ST->getMaxInterleaveFactor();
650}
651
652// For Falkor, we want to avoid having too many strided loads in a loop since
653// that can exhaust the HW prefetcher resources. We adjust the unroller
654// MaxCount preference below to attempt to ensure unrolling doesn't create too
655// many strided loads.
656static void
657getFalkorUnrollingPreferences(Loop *L, ScalarEvolution &SE,
658 TargetTransformInfo::UnrollingPreferences &UP) {
659 enum { MaxStridedLoads = 7 };
660 auto countStridedLoads = [](Loop *L, ScalarEvolution &SE) {
661 int StridedLoads = 0;
662 // FIXME? We could make this more precise by looking at the CFG and
663 // e.g. not counting loads in each side of an if-then-else diamond.
664 for (const auto BB : L->blocks()) {
665 for (auto &I : *BB) {
666 LoadInst *LMemI = dyn_cast<LoadInst>(&I);
667 if (!LMemI)
668 continue;
669
670 Value *PtrValue = LMemI->getPointerOperand();
671 if (L->isLoopInvariant(PtrValue))
672 continue;
673
674 const SCEV *LSCEV = SE.getSCEV(PtrValue);
675 const SCEVAddRecExpr *LSCEVAddRec = dyn_cast<SCEVAddRecExpr>(LSCEV);
676 if (!LSCEVAddRec || !LSCEVAddRec->isAffine())
677 continue;
678
679 // FIXME? We could take pairing of unrolled load copies into account
680 // by looking at the AddRec, but we would probably have to limit this
681 // to loops with no stores or other memory optimization barriers.
682 ++StridedLoads;
683 // We've seen enough strided loads that seeing more won't make a
684 // difference.
685 if (StridedLoads > MaxStridedLoads / 2)
686 return StridedLoads;
687 }
688 }
689 return StridedLoads;
690 };
691
692 int StridedLoads = countStridedLoads(L, SE);
693 DEBUG(dbgs() << "falkor-hwpf: detected " << StridedLoadsdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64tti")) { dbgs() << "falkor-hwpf: detected " <<
StridedLoads << " strided loads\n"; } } while (false)
694 << " strided loads\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64tti")) { dbgs() << "falkor-hwpf: detected " <<
StridedLoads << " strided loads\n"; } } while (false)
;
695 // Pick the largest power of 2 unroll count that won't result in too many
696 // strided loads.
697 if (StridedLoads) {
7
Assuming 'StridedLoads' is not equal to 0
8
Taking true branch
698 UP.MaxCount = 1 << Log2_32(MaxStridedLoads / StridedLoads);
9
The result of the left shift is undefined due to shifting by '4294967295', which is greater or equal to the width of type 'int'
699 DEBUG(dbgs() << "falkor-hwpf: setting unroll MaxCount to " << UP.MaxCountdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64tti")) { dbgs() << "falkor-hwpf: setting unroll MaxCount to "
<< UP.MaxCount << '\n'; } } while (false)
700 << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64tti")) { dbgs() << "falkor-hwpf: setting unroll MaxCount to "
<< UP.MaxCount << '\n'; } } while (false)
;
701 }
702}
703
704void AArch64TTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
705 TTI::UnrollingPreferences &UP) {
706 // Enable partial unrolling and runtime unrolling.
707 BaseT::getUnrollingPreferences(L, SE, UP);
708
709 // For inner loop, it is more likely to be a hot one, and the runtime check
710 // can be promoted out from LICM pass, so the overhead is less, let's try
711 // a larger threshold to unroll more loops.
712 if (L->getLoopDepth() > 1)
1
Assuming the condition is false
2
Taking false branch
713 UP.PartialThreshold *= 2;
714
715 // Disable partial & runtime unrolling on -Os.
716 UP.PartialOptSizeThreshold = 0;
717
718 if (ST->getProcFamily() == AArch64Subtarget::Falkor &&
3
Assuming the condition is true
5
Taking true branch
719 EnableFalkorHWPFUnrollFix)
4
Assuming the condition is true
720 getFalkorUnrollingPreferences(L, SE, UP);
6
Calling 'getFalkorUnrollingPreferences'
721}
722
723Value *AArch64TTIImpl::getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
724 Type *ExpectedType) {
725 switch (Inst->getIntrinsicID()) {
726 default:
727 return nullptr;
728 case Intrinsic::aarch64_neon_st2:
729 case Intrinsic::aarch64_neon_st3:
730 case Intrinsic::aarch64_neon_st4: {
731 // Create a struct type
732 StructType *ST = dyn_cast<StructType>(ExpectedType);
733 if (!ST)
734 return nullptr;
735 unsigned NumElts = Inst->getNumArgOperands() - 1;
736 if (ST->getNumElements() != NumElts)
737 return nullptr;
738 for (unsigned i = 0, e = NumElts; i != e; ++i) {
739 if (Inst->getArgOperand(i)->getType() != ST->getElementType(i))
740 return nullptr;
741 }
742 Value *Res = UndefValue::get(ExpectedType);
743 IRBuilder<> Builder(Inst);
744 for (unsigned i = 0, e = NumElts; i != e; ++i) {
745 Value *L = Inst->getArgOperand(i);
746 Res = Builder.CreateInsertValue(Res, L, i);
747 }
748 return Res;
749 }
750 case Intrinsic::aarch64_neon_ld2:
751 case Intrinsic::aarch64_neon_ld3:
752 case Intrinsic::aarch64_neon_ld4:
753 if (Inst->getType() == ExpectedType)
754 return Inst;
755 return nullptr;
756 }
757}
758
759bool AArch64TTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst,
760 MemIntrinsicInfo &Info) {
761 switch (Inst->getIntrinsicID()) {
762 default:
763 break;
764 case Intrinsic::aarch64_neon_ld2:
765 case Intrinsic::aarch64_neon_ld3:
766 case Intrinsic::aarch64_neon_ld4:
767 Info.ReadMem = true;
768 Info.WriteMem = false;
769 Info.PtrVal = Inst->getArgOperand(0);
770 break;
771 case Intrinsic::aarch64_neon_st2:
772 case Intrinsic::aarch64_neon_st3:
773 case Intrinsic::aarch64_neon_st4:
774 Info.ReadMem = false;
775 Info.WriteMem = true;
776 Info.PtrVal = Inst->getArgOperand(Inst->getNumArgOperands() - 1);
777 break;
778 }
779
780 switch (Inst->getIntrinsicID()) {
781 default:
782 return false;
783 case Intrinsic::aarch64_neon_ld2:
784 case Intrinsic::aarch64_neon_st2:
785 Info.MatchingId = VECTOR_LDST_TWO_ELEMENTS;
786 break;
787 case Intrinsic::aarch64_neon_ld3:
788 case Intrinsic::aarch64_neon_st3:
789 Info.MatchingId = VECTOR_LDST_THREE_ELEMENTS;
790 break;
791 case Intrinsic::aarch64_neon_ld4:
792 case Intrinsic::aarch64_neon_st4:
793 Info.MatchingId = VECTOR_LDST_FOUR_ELEMENTS;
794 break;
795 }
796 return true;
797}
798
799/// See if \p I should be considered for address type promotion. We check if \p
800/// I is a sext with right type and used in memory accesses. If it used in a
801/// "complex" getelementptr, we allow it to be promoted without finding other
802/// sext instructions that sign extended the same initial value. A getelementptr
803/// is considered as "complex" if it has more than 2 operands.
804bool AArch64TTIImpl::shouldConsiderAddressTypePromotion(
805 const Instruction &I, bool &AllowPromotionWithoutCommonHeader) {
806 bool Considerable = false;
807 AllowPromotionWithoutCommonHeader = false;
808 if (!isa<SExtInst>(&I))
809 return false;
810 Type *ConsideredSExtType =
811 Type::getInt64Ty(I.getParent()->getParent()->getContext());
812 if (I.getType() != ConsideredSExtType)
813 return false;
814 // See if the sext is the one with the right type and used in at least one
815 // GetElementPtrInst.
816 for (const User *U : I.users()) {
817 if (const GetElementPtrInst *GEPInst = dyn_cast<GetElementPtrInst>(U)) {
818 Considerable = true;
819 // A getelementptr is considered as "complex" if it has more than 2
820 // operands. We will promote a SExt used in such complex GEP as we
821 // expect some computation to be merged if they are done on 64 bits.
822 if (GEPInst->getNumOperands() > 2) {
823 AllowPromotionWithoutCommonHeader = true;
824 break;
825 }
826 }
827 }
828 return Considerable;
829}
830
831unsigned AArch64TTIImpl::getCacheLineSize() {
832 return ST->getCacheLineSize();
833}
834
835unsigned AArch64TTIImpl::getPrefetchDistance() {
836 return ST->getPrefetchDistance();
837}
838
839unsigned AArch64TTIImpl::getMinPrefetchStride() {
840 return ST->getMinPrefetchStride();
841}
842
843unsigned AArch64TTIImpl::getMaxPrefetchIterationsAhead() {
844 return ST->getMaxPrefetchIterationsAhead();
845}
846
847bool AArch64TTIImpl::useReductionIntrinsic(unsigned Opcode, Type *Ty,
848 TTI::ReductionFlags Flags) const {
849 assert(isa<VectorType>(Ty) && "Expected Ty to be a vector type")((isa<VectorType>(Ty) && "Expected Ty to be a vector type"
) ? static_cast<void> (0) : __assert_fail ("isa<VectorType>(Ty) && \"Expected Ty to be a vector type\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Target/AArch64/AArch64TargetTransformInfo.cpp"
, 849, __PRETTY_FUNCTION__))
;
850 unsigned ScalarBits = Ty->getScalarSizeInBits();
851 switch (Opcode) {
852 case Instruction::FAdd:
853 case Instruction::FMul:
854 case Instruction::And:
855 case Instruction::Or:
856 case Instruction::Xor:
857 case Instruction::Mul:
858 return false;
859 case Instruction::Add:
860 return ScalarBits * Ty->getVectorNumElements() >= 128;
861 case Instruction::ICmp:
862 return (ScalarBits < 64) &&
863 (ScalarBits * Ty->getVectorNumElements() >= 128);
864 case Instruction::FCmp:
865 return Flags.NoNaN;
866 default:
867 llvm_unreachable("Unhandled reduction opcode")::llvm::llvm_unreachable_internal("Unhandled reduction opcode"
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Target/AArch64/AArch64TargetTransformInfo.cpp"
, 867)
;
868 }
869 return false;
870}