Bug Summary

File:lib/Target/Hexagon/HexagonTargetTransformInfo.cpp
Warning:line 166, column 20
Division by zero

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name HexagonTargetTransformInfo.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -mrelocation-model pic -pic-level 2 -mthread-model posix -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-8/lib/clang/8.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-8~svn345461/build-llvm/lib/Target/Hexagon -I /build/llvm-toolchain-snapshot-8~svn345461/lib/Target/Hexagon -I /build/llvm-toolchain-snapshot-8~svn345461/build-llvm/include -I /build/llvm-toolchain-snapshot-8~svn345461/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/include/clang/8.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-8/lib/clang/8.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-8~svn345461/build-llvm/lib/Target/Hexagon -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2018-10-27-211344-32123-1 -x c++ /build/llvm-toolchain-snapshot-8~svn345461/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp -faddrsig
1//===- HexagonTargetTransformInfo.cpp - Hexagon specific TTI pass ---------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8/// \file
9/// This file implements a TargetTransformInfo analysis pass specific to the
10/// Hexagon target machine. It uses the target's detailed information to provide
11/// more precise answers to certain TTI queries, while letting the target
12/// independent and default TTI implementations handle the rest.
13///
14//===----------------------------------------------------------------------===//
15
16#include "HexagonTargetTransformInfo.h"
17#include "HexagonSubtarget.h"
18#include "llvm/Analysis/TargetTransformInfo.h"
19#include "llvm/CodeGen/ValueTypes.h"
20#include "llvm/IR/InstrTypes.h"
21#include "llvm/IR/Instructions.h"
22#include "llvm/IR/User.h"
23#include "llvm/Support/Casting.h"
24#include "llvm/Support/CommandLine.h"
25#include "llvm/Transforms/Utils/UnrollLoop.h"
26
27using namespace llvm;
28
29#define DEBUG_TYPE"hexagontti" "hexagontti"
30
31static cl::opt<bool> HexagonAutoHVX("hexagon-autohvx", cl::init(false),
32 cl::Hidden, cl::desc("Enable loop vectorizer for HVX"));
33
34static cl::opt<bool> EmitLookupTables("hexagon-emit-lookup-tables",
35 cl::init(true), cl::Hidden,
36 cl::desc("Control lookup table emission on Hexagon target"));
37
38// Constant "cost factor" to make floating point operations more expensive
39// in terms of vectorization cost. This isn't the best way, but it should
40// do. Ultimately, the cost should use cycles.
41static const unsigned FloatFactor = 4;
42
43bool HexagonTTIImpl::useHVX() const {
44 return ST.useHVXOps() && HexagonAutoHVX;
45}
46
47bool HexagonTTIImpl::isTypeForHVX(Type *VecTy) const {
48 assert(VecTy->isVectorTy())((VecTy->isVectorTy()) ? static_cast<void> (0) : __assert_fail
("VecTy->isVectorTy()", "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp"
, 48, __PRETTY_FUNCTION__))
;
49 // Avoid types like <2 x i32*>.
50 if (!cast<VectorType>(VecTy)->getElementType()->isIntegerTy())
51 return false;
52 EVT VecVT = EVT::getEVT(VecTy);
53 if (!VecVT.isSimple() || VecVT.getSizeInBits() <= 64)
54 return false;
55 if (ST.isHVXVectorType(VecVT.getSimpleVT()))
56 return true;
57 auto Action = TLI.getPreferredVectorAction(VecVT);
58 return Action == TargetLoweringBase::TypeWidenVector;
59}
60
61unsigned HexagonTTIImpl::getTypeNumElements(Type *Ty) const {
62 if (Ty->isVectorTy())
63 return Ty->getVectorNumElements();
64 assert((Ty->isIntegerTy() || Ty->isFloatingPointTy()) &&(((Ty->isIntegerTy() || Ty->isFloatingPointTy()) &&
"Expecting scalar type") ? static_cast<void> (0) : __assert_fail
("(Ty->isIntegerTy() || Ty->isFloatingPointTy()) && \"Expecting scalar type\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp"
, 65, __PRETTY_FUNCTION__))
65 "Expecting scalar type")(((Ty->isIntegerTy() || Ty->isFloatingPointTy()) &&
"Expecting scalar type") ? static_cast<void> (0) : __assert_fail
("(Ty->isIntegerTy() || Ty->isFloatingPointTy()) && \"Expecting scalar type\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp"
, 65, __PRETTY_FUNCTION__))
;
66 return 1;
67}
68
69TargetTransformInfo::PopcntSupportKind
70HexagonTTIImpl::getPopcntSupport(unsigned IntTyWidthInBit) const {
71 // Return fast hardware support as every input < 64 bits will be promoted
72 // to 64 bits.
73 return TargetTransformInfo::PSK_FastHardware;
74}
75
76// The Hexagon target can unroll loops with run-time trip counts.
77void HexagonTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
78 TTI::UnrollingPreferences &UP) {
79 UP.Runtime = UP.Partial = true;
80 // Only try to peel innermost loops with small runtime trip counts.
81 if (L && L->empty() && canPeel(L) &&
82 SE.getSmallConstantTripCount(L) == 0 &&
83 SE.getSmallConstantMaxTripCount(L) > 0 &&
84 SE.getSmallConstantMaxTripCount(L) <= 5) {
85 UP.PeelCount = 2;
86 }
87}
88
89bool HexagonTTIImpl::shouldFavorPostInc() const {
90 return true;
91}
92
93/// --- Vector TTI begin ---
94
95unsigned HexagonTTIImpl::getNumberOfRegisters(bool Vector) const {
96 if (Vector)
97 return useHVX() ? 32 : 0;
98 return 32;
99}
100
101unsigned HexagonTTIImpl::getMaxInterleaveFactor(unsigned VF) {
102 return useHVX() ? 2 : 0;
103}
104
105unsigned HexagonTTIImpl::getRegisterBitWidth(bool Vector) const {
106 return Vector ? getMinVectorRegisterBitWidth() : 32;
10
'?' condition is true
11
Calling 'HexagonTTIImpl::getMinVectorRegisterBitWidth'
15
Returning from 'HexagonTTIImpl::getMinVectorRegisterBitWidth'
16
Returning zero
107}
108
109unsigned HexagonTTIImpl::getMinVectorRegisterBitWidth() const {
110 return useHVX() ? ST.getVectorLength()*8 : 0;
12
Assuming the condition is false
13
'?' condition is false
14
Returning zero
111}
112
113unsigned HexagonTTIImpl::getMinimumVF(unsigned ElemWidth) const {
114 return (8 * ST.getVectorLength()) / ElemWidth;
115}
116
117unsigned HexagonTTIImpl::getScalarizationOverhead(Type *Ty, bool Insert,
118 bool Extract) {
119 return BaseT::getScalarizationOverhead(Ty, Insert, Extract);
120}
121
122unsigned HexagonTTIImpl::getOperandsScalarizationOverhead(
123 ArrayRef<const Value*> Args, unsigned VF) {
124 return BaseT::getOperandsScalarizationOverhead(Args, VF);
125}
126
127unsigned HexagonTTIImpl::getCallInstrCost(Function *F, Type *RetTy,
128 ArrayRef<Type*> Tys) {
129 return BaseT::getCallInstrCost(F, RetTy, Tys);
130}
131
132unsigned HexagonTTIImpl::getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
133 ArrayRef<Value*> Args, FastMathFlags FMF, unsigned VF) {
134 return BaseT::getIntrinsicInstrCost(ID, RetTy, Args, FMF, VF);
135}
136
137unsigned HexagonTTIImpl::getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
138 ArrayRef<Type*> Tys, FastMathFlags FMF,
139 unsigned ScalarizationCostPassed) {
140 if (ID == Intrinsic::bswap) {
141 std::pair<int, MVT> LT = TLI.getTypeLegalizationCost(DL, RetTy);
142 return LT.first + 2;
143 }
144 return BaseT::getIntrinsicInstrCost(ID, RetTy, Tys, FMF,
145 ScalarizationCostPassed);
146}
147
148unsigned HexagonTTIImpl::getAddressComputationCost(Type *Tp,
149 ScalarEvolution *SE, const SCEV *S) {
150 return 0;
151}
152
153unsigned HexagonTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
154 unsigned Alignment, unsigned AddressSpace, const Instruction *I) {
155 assert(Opcode == Instruction::Load || Opcode == Instruction::Store)((Opcode == Instruction::Load || Opcode == Instruction::Store
) ? static_cast<void> (0) : __assert_fail ("Opcode == Instruction::Load || Opcode == Instruction::Store"
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp"
, 155, __PRETTY_FUNCTION__))
;
156 if (Opcode == Instruction::Store)
5
Taking false branch
157 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, I);
158
159 if (Src->isVectorTy()) {
6
Taking true branch
160 VectorType *VecTy = cast<VectorType>(Src);
161 unsigned VecWidth = VecTy->getBitWidth();
162 if (useHVX() && isTypeForHVX(VecTy)) {
7
Assuming the condition is true
8
Taking true branch
163 unsigned RegWidth = getRegisterBitWidth(true);
9
Calling 'HexagonTTIImpl::getRegisterBitWidth'
17
Returning from 'HexagonTTIImpl::getRegisterBitWidth'
18
'RegWidth' initialized to 0
164 Alignment = std::min(Alignment, RegWidth/8);
165 // Cost of HVX loads.
166 if (VecWidth % RegWidth == 0)
19
Division by zero
167 return VecWidth / RegWidth;
168 // Cost of constructing HVX vector from scalar loads.
169 unsigned AlignWidth = 8 * std::max(1u, Alignment);
170 unsigned NumLoads = alignTo(VecWidth, AlignWidth) / AlignWidth;
171 return 3*NumLoads;
172 }
173
174 // Non-HVX vectors.
175 // Add extra cost for floating point types.
176 unsigned Cost = VecTy->getElementType()->isFloatingPointTy() ? FloatFactor
177 : 1;
178 Alignment = std::min(Alignment, 8u);
179 unsigned AlignWidth = 8 * std::max(1u, Alignment);
180 unsigned NumLoads = alignTo(VecWidth, AlignWidth) / AlignWidth;
181 if (Alignment == 4 || Alignment == 8)
182 return Cost * NumLoads;
183 // Loads of less than 32 bits will need extra inserts to compose a vector.
184 unsigned LogA = Log2_32(Alignment);
185 return (3 - LogA) * Cost * NumLoads;
186 }
187
188 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, I);
189}
190
191unsigned HexagonTTIImpl::getMaskedMemoryOpCost(unsigned Opcode,
192 Type *Src, unsigned Alignment, unsigned AddressSpace) {
193 return BaseT::getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace);
194}
195
196unsigned HexagonTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp,
197 int Index, Type *SubTp) {
198 return 1;
199}
200
201unsigned HexagonTTIImpl::getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
202 Value *Ptr, bool VariableMask, unsigned Alignment) {
203 return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
204 Alignment);
205}
206
207unsigned HexagonTTIImpl::getInterleavedMemoryOpCost(unsigned Opcode,
208 Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
209 unsigned Alignment, unsigned AddressSpace, bool IsMasked) {
210 if (Indices.size() != Factor || IsMasked)
1
Assuming the condition is false
2
Assuming 'IsMasked' is 0
3
Taking false branch
211 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
212 Alignment, AddressSpace, IsMasked);
213 return getMemoryOpCost(Opcode, VecTy, Alignment, AddressSpace, nullptr);
4
Calling 'HexagonTTIImpl::getMemoryOpCost'
214}
215
216unsigned HexagonTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
217 Type *CondTy, const Instruction *I) {
218 if (ValTy->isVectorTy()) {
219 std::pair<int, MVT> LT = TLI.getTypeLegalizationCost(DL, ValTy);
220 if (Opcode == Instruction::FCmp)
221 return LT.first + FloatFactor * getTypeNumElements(ValTy);
222 }
223 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, I);
224}
225
226unsigned HexagonTTIImpl::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
227 TTI::OperandValueKind Opd1Info, TTI::OperandValueKind Opd2Info,
228 TTI::OperandValueProperties Opd1PropInfo,
229 TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value*> Args) {
230 if (Ty->isVectorTy()) {
231 std::pair<int, MVT> LT = TLI.getTypeLegalizationCost(DL, Ty);
232 if (LT.second.isFloatingPoint())
233 return LT.first + FloatFactor * getTypeNumElements(Ty);
234 }
235 return BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
236 Opd1PropInfo, Opd2PropInfo, Args);
237}
238
239unsigned HexagonTTIImpl::getCastInstrCost(unsigned Opcode, Type *DstTy,
240 Type *SrcTy, const Instruction *I) {
241 if (SrcTy->isFPOrFPVectorTy() || DstTy->isFPOrFPVectorTy()) {
242 unsigned SrcN = SrcTy->isFPOrFPVectorTy() ? getTypeNumElements(SrcTy) : 0;
243 unsigned DstN = DstTy->isFPOrFPVectorTy() ? getTypeNumElements(DstTy) : 0;
244
245 std::pair<int, MVT> SrcLT = TLI.getTypeLegalizationCost(DL, SrcTy);
246 std::pair<int, MVT> DstLT = TLI.getTypeLegalizationCost(DL, DstTy);
247 return std::max(SrcLT.first, DstLT.first) + FloatFactor * (SrcN + DstN);
248 }
249 return 1;
250}
251
252unsigned HexagonTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
253 unsigned Index) {
254 Type *ElemTy = Val->isVectorTy() ? cast<VectorType>(Val)->getElementType()
255 : Val;
256 if (Opcode == Instruction::InsertElement) {
257 // Need two rotations for non-zero index.
258 unsigned Cost = (Index != 0) ? 2 : 0;
259 if (ElemTy->isIntegerTy(32))
260 return Cost;
261 // If it's not a 32-bit value, there will need to be an extract.
262 return Cost + getVectorInstrCost(Instruction::ExtractElement, Val, Index);
263 }
264
265 if (Opcode == Instruction::ExtractElement)
266 return 2;
267
268 return 1;
269}
270
271/// --- Vector TTI end ---
272
273unsigned HexagonTTIImpl::getPrefetchDistance() const {
274 return ST.getL1PrefetchDistance();
275}
276
277unsigned HexagonTTIImpl::getCacheLineSize() const {
278 return ST.getL1CacheLineSize();
279}
280
281int HexagonTTIImpl::getUserCost(const User *U,
282 ArrayRef<const Value *> Operands) {
283 auto isCastFoldedIntoLoad = [this](const CastInst *CI) -> bool {
284 if (!CI->isIntegerCast())
285 return false;
286 // Only extensions from an integer type shorter than 32-bit to i32
287 // can be folded into the load.
288 const DataLayout &DL = getDataLayout();
289 unsigned SBW = DL.getTypeSizeInBits(CI->getSrcTy());
290 unsigned DBW = DL.getTypeSizeInBits(CI->getDestTy());
291 if (DBW != 32 || SBW >= DBW)
292 return false;
293
294 const LoadInst *LI = dyn_cast<const LoadInst>(CI->getOperand(0));
295 // Technically, this code could allow multiple uses of the load, and
296 // check if all the uses are the same extension operation, but this
297 // should be sufficient for most cases.
298 return LI && LI->hasOneUse();
299 };
300
301 if (const CastInst *CI = dyn_cast<const CastInst>(U))
302 if (isCastFoldedIntoLoad(CI))
303 return TargetTransformInfo::TCC_Free;
304 return BaseT::getUserCost(U, Operands);
305}
306
307bool HexagonTTIImpl::shouldBuildLookupTables() const {
308 return EmitLookupTables;
309}