Bug Summary

File:build/source/llvm/lib/Analysis/IVDescriptors.cpp
Warning:line 1191, column 8
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name IVDescriptors.cpp -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/source/build-llvm/tools/clang/stage2-bins -resource-dir /usr/lib/llvm-17/lib/clang/17 -D _DEBUG -D _GLIBCXX_ASSERTIONS -D _GNU_SOURCE -D _LIBCPP_ENABLE_ASSERTIONS -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/Analysis -I /build/source/llvm/lib/Analysis -I include -I /build/source/llvm/include -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-17/lib/clang/17/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/source/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fmacro-prefix-map=/build/source/= -fcoverage-prefix-map=/build/source/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fcoverage-prefix-map=/build/source/= -source-date-epoch 1683717183 -O2 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -Wno-misleading-indentation -std=c++17 -fdeprecated-macro -fdebug-compilation-dir=/build/source/build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/source/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/source/= -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2023-05-10-133810-16478-1 -x c++ /build/source/llvm/lib/Analysis/IVDescriptors.cpp
1//===- llvm/Analysis/IVDescriptors.cpp - IndVar Descriptors -----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file "describes" induction and recurrence variables.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/Analysis/IVDescriptors.h"
14#include "llvm/Analysis/DemandedBits.h"
15#include "llvm/Analysis/LoopInfo.h"
16#include "llvm/Analysis/ScalarEvolution.h"
17#include "llvm/Analysis/ScalarEvolutionExpressions.h"
18#include "llvm/Analysis/ValueTracking.h"
19#include "llvm/IR/Dominators.h"
20#include "llvm/IR/Instructions.h"
21#include "llvm/IR/Module.h"
22#include "llvm/IR/PatternMatch.h"
23#include "llvm/IR/ValueHandle.h"
24#include "llvm/Support/Debug.h"
25#include "llvm/Support/KnownBits.h"
26
27#include <set>
28
29using namespace llvm;
30using namespace llvm::PatternMatch;
31
32#define DEBUG_TYPE"iv-descriptors" "iv-descriptors"
33
34bool RecurrenceDescriptor::areAllUsesIn(Instruction *I,
35 SmallPtrSetImpl<Instruction *> &Set) {
36 for (const Use &Use : I->operands())
37 if (!Set.count(dyn_cast<Instruction>(Use)))
38 return false;
39 return true;
40}
41
42bool RecurrenceDescriptor::isIntegerRecurrenceKind(RecurKind Kind) {
43 switch (Kind) {
44 default:
45 break;
46 case RecurKind::Add:
47 case RecurKind::Mul:
48 case RecurKind::Or:
49 case RecurKind::And:
50 case RecurKind::Xor:
51 case RecurKind::SMax:
52 case RecurKind::SMin:
53 case RecurKind::UMax:
54 case RecurKind::UMin:
55 case RecurKind::SelectICmp:
56 case RecurKind::SelectFCmp:
57 return true;
58 }
59 return false;
60}
61
62bool RecurrenceDescriptor::isFloatingPointRecurrenceKind(RecurKind Kind) {
63 return (Kind != RecurKind::None) && !isIntegerRecurrenceKind(Kind);
64}
65
66/// Determines if Phi may have been type-promoted. If Phi has a single user
67/// that ANDs the Phi with a type mask, return the user. RT is updated to
68/// account for the narrower bit width represented by the mask, and the AND
69/// instruction is added to CI.
70static Instruction *lookThroughAnd(PHINode *Phi, Type *&RT,
71 SmallPtrSetImpl<Instruction *> &Visited,
72 SmallPtrSetImpl<Instruction *> &CI) {
73 if (!Phi->hasOneUse())
74 return Phi;
75
76 const APInt *M = nullptr;
77 Instruction *I, *J = cast<Instruction>(Phi->use_begin()->getUser());
78
79 // Matches either I & 2^x-1 or 2^x-1 & I. If we find a match, we update RT
80 // with a new integer type of the corresponding bit width.
81 if (match(J, m_c_And(m_Instruction(I), m_APInt(M)))) {
82 int32_t Bits = (*M + 1).exactLogBase2();
83 if (Bits > 0) {
84 RT = IntegerType::get(Phi->getContext(), Bits);
85 Visited.insert(Phi);
86 CI.insert(J);
87 return J;
88 }
89 }
90 return Phi;
91}
92
93/// Compute the minimal bit width needed to represent a reduction whose exit
94/// instruction is given by Exit.
95static std::pair<Type *, bool> computeRecurrenceType(Instruction *Exit,
96 DemandedBits *DB,
97 AssumptionCache *AC,
98 DominatorTree *DT) {
99 bool IsSigned = false;
100 const DataLayout &DL = Exit->getModule()->getDataLayout();
101 uint64_t MaxBitWidth = DL.getTypeSizeInBits(Exit->getType());
102
103 if (DB) {
104 // Use the demanded bits analysis to determine the bits that are live out
105 // of the exit instruction, rounding up to the nearest power of two. If the
106 // use of demanded bits results in a smaller bit width, we know the value
107 // must be positive (i.e., IsSigned = false), because if this were not the
108 // case, the sign bit would have been demanded.
109 auto Mask = DB->getDemandedBits(Exit);
110 MaxBitWidth = Mask.getBitWidth() - Mask.countl_zero();
111 }
112
113 if (MaxBitWidth == DL.getTypeSizeInBits(Exit->getType()) && AC && DT) {
114 // If demanded bits wasn't able to limit the bit width, we can try to use
115 // value tracking instead. This can be the case, for example, if the value
116 // may be negative.
117 auto NumSignBits = ComputeNumSignBits(Exit, DL, 0, AC, nullptr, DT);
118 auto NumTypeBits = DL.getTypeSizeInBits(Exit->getType());
119 MaxBitWidth = NumTypeBits - NumSignBits;
120 KnownBits Bits = computeKnownBits(Exit, DL);
121 if (!Bits.isNonNegative()) {
122 // If the value is not known to be non-negative, we set IsSigned to true,
123 // meaning that we will use sext instructions instead of zext
124 // instructions to restore the original type.
125 IsSigned = true;
126 // Make sure at at least one sign bit is included in the result, so it
127 // will get properly sign-extended.
128 ++MaxBitWidth;
129 }
130 }
131 MaxBitWidth = llvm::bit_ceil(MaxBitWidth);
132
133 return std::make_pair(Type::getIntNTy(Exit->getContext(), MaxBitWidth),
134 IsSigned);
135}
136
137/// Collect cast instructions that can be ignored in the vectorizer's cost
138/// model, given a reduction exit value and the minimal type in which the
139// reduction can be represented. Also search casts to the recurrence type
140// to find the minimum width used by the recurrence.
141static void collectCastInstrs(Loop *TheLoop, Instruction *Exit,
142 Type *RecurrenceType,
143 SmallPtrSetImpl<Instruction *> &Casts,
144 unsigned &MinWidthCastToRecurTy) {
145
146 SmallVector<Instruction *, 8> Worklist;
147 SmallPtrSet<Instruction *, 8> Visited;
148 Worklist.push_back(Exit);
149 MinWidthCastToRecurTy = -1U;
150
151 while (!Worklist.empty()) {
152 Instruction *Val = Worklist.pop_back_val();
153 Visited.insert(Val);
154 if (auto *Cast = dyn_cast<CastInst>(Val)) {
155 if (Cast->getSrcTy() == RecurrenceType) {
156 // If the source type of a cast instruction is equal to the recurrence
157 // type, it will be eliminated, and should be ignored in the vectorizer
158 // cost model.
159 Casts.insert(Cast);
160 continue;
161 }
162 if (Cast->getDestTy() == RecurrenceType) {
163 // The minimum width used by the recurrence is found by checking for
164 // casts on its operands. The minimum width is used by the vectorizer
165 // when finding the widest type for in-loop reductions without any
166 // loads/stores.
167 MinWidthCastToRecurTy = std::min<unsigned>(
168 MinWidthCastToRecurTy, Cast->getSrcTy()->getScalarSizeInBits());
169 continue;
170 }
171 }
172 // Add all operands to the work list if they are loop-varying values that
173 // we haven't yet visited.
174 for (Value *O : cast<User>(Val)->operands())
175 if (auto *I = dyn_cast<Instruction>(O))
176 if (TheLoop->contains(I) && !Visited.count(I))
177 Worklist.push_back(I);
178 }
179}
180
181// Check if a given Phi node can be recognized as an ordered reduction for
182// vectorizing floating point operations without unsafe math.
183static bool checkOrderedReduction(RecurKind Kind, Instruction *ExactFPMathInst,
184 Instruction *Exit, PHINode *Phi) {
185 // Currently only FAdd and FMulAdd are supported.
186 if (Kind != RecurKind::FAdd && Kind != RecurKind::FMulAdd)
187 return false;
188
189 if (Kind == RecurKind::FAdd && Exit->getOpcode() != Instruction::FAdd)
190 return false;
191
192 if (Kind == RecurKind::FMulAdd &&
193 !RecurrenceDescriptor::isFMulAddIntrinsic(Exit))
194 return false;
195
196 // Ensure the exit instruction has only one user other than the reduction PHI
197 if (Exit != ExactFPMathInst || Exit->hasNUsesOrMore(3))
198 return false;
199
200 // The only pattern accepted is the one in which the reduction PHI
201 // is used as one of the operands of the exit instruction
202 auto *Op0 = Exit->getOperand(0);
203 auto *Op1 = Exit->getOperand(1);
204 if (Kind == RecurKind::FAdd && Op0 != Phi && Op1 != Phi)
205 return false;
206 if (Kind == RecurKind::FMulAdd && Exit->getOperand(2) != Phi)
207 return false;
208
209 LLVM_DEBUG(dbgs() << "LV: Found an ordered reduction: Phi: " << *Phido { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("iv-descriptors")) { dbgs() << "LV: Found an ordered reduction: Phi: "
<< *Phi << ", ExitInst: " << *Exit <<
"\n"; } } while (false)
210 << ", ExitInst: " << *Exit << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("iv-descriptors")) { dbgs() << "LV: Found an ordered reduction: Phi: "
<< *Phi << ", ExitInst: " << *Exit <<
"\n"; } } while (false)
;
211
212 return true;
213}
214
215bool RecurrenceDescriptor::AddReductionVar(
216 PHINode *Phi, RecurKind Kind, Loop *TheLoop, FastMathFlags FuncFMF,
217 RecurrenceDescriptor &RedDes, DemandedBits *DB, AssumptionCache *AC,
218 DominatorTree *DT, ScalarEvolution *SE) {
219 if (Phi->getNumIncomingValues() != 2)
220 return false;
221
222 // Reduction variables are only found in the loop header block.
223 if (Phi->getParent() != TheLoop->getHeader())
224 return false;
225
226 // Obtain the reduction start value from the value that comes from the loop
227 // preheader.
228 Value *RdxStart = Phi->getIncomingValueForBlock(TheLoop->getLoopPreheader());
229
230 // ExitInstruction is the single value which is used outside the loop.
231 // We only allow for a single reduction value to be used outside the loop.
232 // This includes users of the reduction, variables (which form a cycle
233 // which ends in the phi node).
234 Instruction *ExitInstruction = nullptr;
235
236 // Variable to keep last visited store instruction. By the end of the
237 // algorithm this variable will be either empty or having intermediate
238 // reduction value stored in invariant address.
239 StoreInst *IntermediateStore = nullptr;
240
241 // Indicates that we found a reduction operation in our scan.
242 bool FoundReduxOp = false;
243
244 // We start with the PHI node and scan for all of the users of this
245 // instruction. All users must be instructions that can be used as reduction
246 // variables (such as ADD). We must have a single out-of-block user. The cycle
247 // must include the original PHI.
248 bool FoundStartPHI = false;
249
250 // To recognize min/max patterns formed by a icmp select sequence, we store
251 // the number of instruction we saw from the recognized min/max pattern,
252 // to make sure we only see exactly the two instructions.
253 unsigned NumCmpSelectPatternInst = 0;
254 InstDesc ReduxDesc(false, nullptr);
255
256 // Data used for determining if the recurrence has been type-promoted.
257 Type *RecurrenceType = Phi->getType();
258 SmallPtrSet<Instruction *, 4> CastInsts;
259 unsigned MinWidthCastToRecurrenceType;
260 Instruction *Start = Phi;
261 bool IsSigned = false;
262
263 SmallPtrSet<Instruction *, 8> VisitedInsts;
264 SmallVector<Instruction *, 8> Worklist;
265
266 // Return early if the recurrence kind does not match the type of Phi. If the
267 // recurrence kind is arithmetic, we attempt to look through AND operations
268 // resulting from the type promotion performed by InstCombine. Vector
269 // operations are not limited to the legal integer widths, so we may be able
270 // to evaluate the reduction in the narrower width.
271 if (RecurrenceType->isFloatingPointTy()) {
272 if (!isFloatingPointRecurrenceKind(Kind))
273 return false;
274 } else if (RecurrenceType->isIntegerTy()) {
275 if (!isIntegerRecurrenceKind(Kind))
276 return false;
277 if (!isMinMaxRecurrenceKind(Kind))
278 Start = lookThroughAnd(Phi, RecurrenceType, VisitedInsts, CastInsts);
279 } else {
280 // Pointer min/max may exist, but it is not supported as a reduction op.
281 return false;
282 }
283
284 Worklist.push_back(Start);
285 VisitedInsts.insert(Start);
286
287 // Start with all flags set because we will intersect this with the reduction
288 // flags from all the reduction operations.
289 FastMathFlags FMF = FastMathFlags::getFast();
290
291 // The first instruction in the use-def chain of the Phi node that requires
292 // exact floating point operations.
293 Instruction *ExactFPMathInst = nullptr;
294
295 // A value in the reduction can be used:
296 // - By the reduction:
297 // - Reduction operation:
298 // - One use of reduction value (safe).
299 // - Multiple use of reduction value (not safe).
300 // - PHI:
301 // - All uses of the PHI must be the reduction (safe).
302 // - Otherwise, not safe.
303 // - By instructions outside of the loop (safe).
304 // * One value may have several outside users, but all outside
305 // uses must be of the same value.
306 // - By store instructions with a loop invariant address (safe with
307 // the following restrictions):
308 // * If there are several stores, all must have the same address.
309 // * Final value should be stored in that loop invariant address.
310 // - By an instruction that is not part of the reduction (not safe).
311 // This is either:
312 // * An instruction type other than PHI or the reduction operation.
313 // * A PHI in the header other than the initial PHI.
314 while (!Worklist.empty()) {
315 Instruction *Cur = Worklist.pop_back_val();
316
317 // Store instructions are allowed iff it is the store of the reduction
318 // value to the same loop invariant memory location.
319 if (auto *SI = dyn_cast<StoreInst>(Cur)) {
320 if (!SE) {
321 LLVM_DEBUG(dbgs() << "Store instructions are not processed without "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("iv-descriptors")) { dbgs() << "Store instructions are not processed without "
<< "Scalar Evolution Analysis\n"; } } while (false)
322 << "Scalar Evolution Analysis\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("iv-descriptors")) { dbgs() << "Store instructions are not processed without "
<< "Scalar Evolution Analysis\n"; } } while (false)
;
323 return false;
324 }
325
326 const SCEV *PtrScev = SE->getSCEV(SI->getPointerOperand());
327 // Check it is the same address as previous stores
328 if (IntermediateStore) {
329 const SCEV *OtherScev =
330 SE->getSCEV(IntermediateStore->getPointerOperand());
331
332 if (OtherScev != PtrScev) {
333 LLVM_DEBUG(dbgs() << "Storing reduction value to different addresses "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("iv-descriptors")) { dbgs() << "Storing reduction value to different addresses "
<< "inside the loop: " << *SI->getPointerOperand
() << " and " << *IntermediateStore->getPointerOperand
() << '\n'; } } while (false)
334 << "inside the loop: " << *SI->getPointerOperand()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("iv-descriptors")) { dbgs() << "Storing reduction value to different addresses "
<< "inside the loop: " << *SI->getPointerOperand
() << " and " << *IntermediateStore->getPointerOperand
() << '\n'; } } while (false)
335 << " and "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("iv-descriptors")) { dbgs() << "Storing reduction value to different addresses "
<< "inside the loop: " << *SI->getPointerOperand
() << " and " << *IntermediateStore->getPointerOperand
() << '\n'; } } while (false)
336 << *IntermediateStore->getPointerOperand() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("iv-descriptors")) { dbgs() << "Storing reduction value to different addresses "
<< "inside the loop: " << *SI->getPointerOperand
() << " and " << *IntermediateStore->getPointerOperand
() << '\n'; } } while (false)
;
337 return false;
338 }
339 }
340
341 // Check the pointer is loop invariant
342 if (!SE->isLoopInvariant(PtrScev, TheLoop)) {
343 LLVM_DEBUG(dbgs() << "Storing reduction value to non-uniform address "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("iv-descriptors")) { dbgs() << "Storing reduction value to non-uniform address "
<< "inside the loop: " << *SI->getPointerOperand
() << '\n'; } } while (false)
344 << "inside the loop: " << *SI->getPointerOperand()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("iv-descriptors")) { dbgs() << "Storing reduction value to non-uniform address "
<< "inside the loop: " << *SI->getPointerOperand
() << '\n'; } } while (false)
345 << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("iv-descriptors")) { dbgs() << "Storing reduction value to non-uniform address "
<< "inside the loop: " << *SI->getPointerOperand
() << '\n'; } } while (false)
;
346 return false;
347 }
348
349 // IntermediateStore is always the last store in the loop.
350 IntermediateStore = SI;
351 continue;
352 }
353
354 // No Users.
355 // If the instruction has no users then this is a broken chain and can't be
356 // a reduction variable.
357 if (Cur->use_empty())
358 return false;
359
360 bool IsAPhi = isa<PHINode>(Cur);
361
362 // A header PHI use other than the original PHI.
363 if (Cur != Phi && IsAPhi && Cur->getParent() == Phi->getParent())
364 return false;
365
366 // Reductions of instructions such as Div, and Sub is only possible if the
367 // LHS is the reduction variable.
368 if (!Cur->isCommutative() && !IsAPhi && !isa<SelectInst>(Cur) &&
369 !isa<ICmpInst>(Cur) && !isa<FCmpInst>(Cur) &&
370 !VisitedInsts.count(dyn_cast<Instruction>(Cur->getOperand(0))))
371 return false;
372
373 // Any reduction instruction must be of one of the allowed kinds. We ignore
374 // the starting value (the Phi or an AND instruction if the Phi has been
375 // type-promoted).
376 if (Cur != Start) {
377 ReduxDesc =
378 isRecurrenceInstr(TheLoop, Phi, Cur, Kind, ReduxDesc, FuncFMF);
379 ExactFPMathInst = ExactFPMathInst == nullptr
380 ? ReduxDesc.getExactFPMathInst()
381 : ExactFPMathInst;
382 if (!ReduxDesc.isRecurrence())
383 return false;
384 // FIXME: FMF is allowed on phi, but propagation is not handled correctly.
385 if (isa<FPMathOperator>(ReduxDesc.getPatternInst()) && !IsAPhi) {
386 FastMathFlags CurFMF = ReduxDesc.getPatternInst()->getFastMathFlags();
387 if (auto *Sel = dyn_cast<SelectInst>(ReduxDesc.getPatternInst())) {
388 // Accept FMF on either fcmp or select of a min/max idiom.
389 // TODO: This is a hack to work-around the fact that FMF may not be
390 // assigned/propagated correctly. If that problem is fixed or we
391 // standardize on fmin/fmax via intrinsics, this can be removed.
392 if (auto *FCmp = dyn_cast<FCmpInst>(Sel->getCondition()))
393 CurFMF |= FCmp->getFastMathFlags();
394 }
395 FMF &= CurFMF;
396 }
397 // Update this reduction kind if we matched a new instruction.
398 // TODO: Can we eliminate the need for a 2nd InstDesc by keeping 'Kind'
399 // state accurate while processing the worklist?
400 if (ReduxDesc.getRecKind() != RecurKind::None)
401 Kind = ReduxDesc.getRecKind();
402 }
403
404 bool IsASelect = isa<SelectInst>(Cur);
405
406 // A conditional reduction operation must only have 2 or less uses in
407 // VisitedInsts.
408 if (IsASelect && (Kind == RecurKind::FAdd || Kind == RecurKind::FMul) &&
409 hasMultipleUsesOf(Cur, VisitedInsts, 2))
410 return false;
411
412 // A reduction operation must only have one use of the reduction value.
413 if (!IsAPhi && !IsASelect && !isMinMaxRecurrenceKind(Kind) &&
414 !isSelectCmpRecurrenceKind(Kind) &&
415 hasMultipleUsesOf(Cur, VisitedInsts, 1))
416 return false;
417
418 // All inputs to a PHI node must be a reduction value.
419 if (IsAPhi && Cur != Phi && !areAllUsesIn(Cur, VisitedInsts))
420 return false;
421
422 if ((isIntMinMaxRecurrenceKind(Kind) || Kind == RecurKind::SelectICmp) &&
423 (isa<ICmpInst>(Cur) || isa<SelectInst>(Cur)))
424 ++NumCmpSelectPatternInst;
425 if ((isFPMinMaxRecurrenceKind(Kind) || Kind == RecurKind::SelectFCmp) &&
426 (isa<FCmpInst>(Cur) || isa<SelectInst>(Cur)))
427 ++NumCmpSelectPatternInst;
428
429 // Check whether we found a reduction operator.
430 FoundReduxOp |= !IsAPhi && Cur != Start;
431
432 // Process users of current instruction. Push non-PHI nodes after PHI nodes
433 // onto the stack. This way we are going to have seen all inputs to PHI
434 // nodes once we get to them.
435 SmallVector<Instruction *, 8> NonPHIs;
436 SmallVector<Instruction *, 8> PHIs;
437 for (User *U : Cur->users()) {
438 Instruction *UI = cast<Instruction>(U);
439
440 // If the user is a call to llvm.fmuladd then the instruction can only be
441 // the final operand.
442 if (isFMulAddIntrinsic(UI))
443 if (Cur == UI->getOperand(0) || Cur == UI->getOperand(1))
444 return false;
445
446 // Check if we found the exit user.
447 BasicBlock *Parent = UI->getParent();
448 if (!TheLoop->contains(Parent)) {
449 // If we already know this instruction is used externally, move on to
450 // the next user.
451 if (ExitInstruction == Cur)
452 continue;
453
454 // Exit if you find multiple values used outside or if the header phi
455 // node is being used. In this case the user uses the value of the
456 // previous iteration, in which case we would loose "VF-1" iterations of
457 // the reduction operation if we vectorize.
458 if (ExitInstruction != nullptr || Cur == Phi)
459 return false;
460
461 // The instruction used by an outside user must be the last instruction
462 // before we feed back to the reduction phi. Otherwise, we loose VF-1
463 // operations on the value.
464 if (!is_contained(Phi->operands(), Cur))
465 return false;
466
467 ExitInstruction = Cur;
468 continue;
469 }
470
471 // Process instructions only once (termination). Each reduction cycle
472 // value must only be used once, except by phi nodes and min/max
473 // reductions which are represented as a cmp followed by a select.
474 InstDesc IgnoredVal(false, nullptr);
475 if (VisitedInsts.insert(UI).second) {
476 if (isa<PHINode>(UI)) {
477 PHIs.push_back(UI);
478 } else {
479 StoreInst *SI = dyn_cast<StoreInst>(UI);
480 if (SI && SI->getPointerOperand() == Cur) {
481 // Reduction variable chain can only be stored somewhere but it
482 // can't be used as an address.
483 return false;
484 }
485 NonPHIs.push_back(UI);
486 }
487 } else if (!isa<PHINode>(UI) &&
488 ((!isa<FCmpInst>(UI) && !isa<ICmpInst>(UI) &&
489 !isa<SelectInst>(UI)) ||
490 (!isConditionalRdxPattern(Kind, UI).isRecurrence() &&
491 !isSelectCmpPattern(TheLoop, Phi, UI, IgnoredVal)
492 .isRecurrence() &&
493 !isMinMaxPattern(UI, Kind, IgnoredVal).isRecurrence())))
494 return false;
495
496 // Remember that we completed the cycle.
497 if (UI == Phi)
498 FoundStartPHI = true;
499 }
500 Worklist.append(PHIs.begin(), PHIs.end());
501 Worklist.append(NonPHIs.begin(), NonPHIs.end());
502 }
503
504 // This means we have seen one but not the other instruction of the
505 // pattern or more than just a select and cmp. Zero implies that we saw a
506 // llvm.min/max intrinsic, which is always OK.
507 if (isMinMaxRecurrenceKind(Kind) && NumCmpSelectPatternInst != 2 &&
508 NumCmpSelectPatternInst != 0)
509 return false;
510
511 if (isSelectCmpRecurrenceKind(Kind) && NumCmpSelectPatternInst != 1)
512 return false;
513
514 if (IntermediateStore) {
515 // Check that stored value goes to the phi node again. This way we make sure
516 // that the value stored in IntermediateStore is indeed the final reduction
517 // value.
518 if (!is_contained(Phi->operands(), IntermediateStore->getValueOperand())) {
519 LLVM_DEBUG(dbgs() << "Not a final reduction value stored: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("iv-descriptors")) { dbgs() << "Not a final reduction value stored: "
<< *IntermediateStore << '\n'; } } while (false)
520 << *IntermediateStore << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("iv-descriptors")) { dbgs() << "Not a final reduction value stored: "
<< *IntermediateStore << '\n'; } } while (false)
;
521 return false;
522 }
523
524 // If there is an exit instruction it's value should be stored in
525 // IntermediateStore
526 if (ExitInstruction &&
527 IntermediateStore->getValueOperand() != ExitInstruction) {
528 LLVM_DEBUG(dbgs() << "Last store Instruction of reduction value does not "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("iv-descriptors")) { dbgs() << "Last store Instruction of reduction value does not "
"store last calculated value of the reduction: " << *IntermediateStore
<< '\n'; } } while (false)
529 "store last calculated value of the reduction: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("iv-descriptors")) { dbgs() << "Last store Instruction of reduction value does not "
"store last calculated value of the reduction: " << *IntermediateStore
<< '\n'; } } while (false)
530 << *IntermediateStore << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("iv-descriptors")) { dbgs() << "Last store Instruction of reduction value does not "
"store last calculated value of the reduction: " << *IntermediateStore
<< '\n'; } } while (false)
;
531 return false;
532 }
533
534 // If all uses are inside the loop (intermediate stores), then the
535 // reduction value after the loop will be the one used in the last store.
536 if (!ExitInstruction)
537 ExitInstruction = cast<Instruction>(IntermediateStore->getValueOperand());
538 }
539
540 if (!FoundStartPHI || !FoundReduxOp || !ExitInstruction)
541 return false;
542
543 const bool IsOrdered =
544 checkOrderedReduction(Kind, ExactFPMathInst, ExitInstruction, Phi);
545
546 if (Start != Phi) {
547 // If the starting value is not the same as the phi node, we speculatively
548 // looked through an 'and' instruction when evaluating a potential
549 // arithmetic reduction to determine if it may have been type-promoted.
550 //
551 // We now compute the minimal bit width that is required to represent the
552 // reduction. If this is the same width that was indicated by the 'and', we
553 // can represent the reduction in the smaller type. The 'and' instruction
554 // will be eliminated since it will essentially be a cast instruction that
555 // can be ignore in the cost model. If we compute a different type than we
556 // did when evaluating the 'and', the 'and' will not be eliminated, and we
557 // will end up with different kinds of operations in the recurrence
558 // expression (e.g., IntegerAND, IntegerADD). We give up if this is
559 // the case.
560 //
561 // The vectorizer relies on InstCombine to perform the actual
562 // type-shrinking. It does this by inserting instructions to truncate the
563 // exit value of the reduction to the width indicated by RecurrenceType and
564 // then extend this value back to the original width. If IsSigned is false,
565 // a 'zext' instruction will be generated; otherwise, a 'sext' will be
566 // used.
567 //
568 // TODO: We should not rely on InstCombine to rewrite the reduction in the
569 // smaller type. We should just generate a correctly typed expression
570 // to begin with.
571 Type *ComputedType;
572 std::tie(ComputedType, IsSigned) =
573 computeRecurrenceType(ExitInstruction, DB, AC, DT);
574 if (ComputedType != RecurrenceType)
575 return false;
576 }
577
578 // Collect cast instructions and the minimum width used by the recurrence.
579 // If the starting value is not the same as the phi node and the computed
580 // recurrence type is equal to the recurrence type, the recurrence expression
581 // will be represented in a narrower or wider type. If there are any cast
582 // instructions that will be unnecessary, collect them in CastsFromRecurTy.
583 // Note that the 'and' instruction was already included in this list.
584 //
585 // TODO: A better way to represent this may be to tag in some way all the
586 // instructions that are a part of the reduction. The vectorizer cost
587 // model could then apply the recurrence type to these instructions,
588 // without needing a white list of instructions to ignore.
589 // This may also be useful for the inloop reductions, if it can be
590 // kept simple enough.
591 collectCastInstrs(TheLoop, ExitInstruction, RecurrenceType, CastInsts,
592 MinWidthCastToRecurrenceType);
593
594 // We found a reduction var if we have reached the original phi node and we
595 // only have a single instruction with out-of-loop users.
596
597 // The ExitInstruction(Instruction which is allowed to have out-of-loop users)
598 // is saved as part of the RecurrenceDescriptor.
599
600 // Save the description of this reduction variable.
601 RecurrenceDescriptor RD(RdxStart, ExitInstruction, IntermediateStore, Kind,
602 FMF, ExactFPMathInst, RecurrenceType, IsSigned,
603 IsOrdered, CastInsts, MinWidthCastToRecurrenceType);
604 RedDes = RD;
605
606 return true;
607}
608
609// We are looking for loops that do something like this:
610// int r = 0;
611// for (int i = 0; i < n; i++) {
612// if (src[i] > 3)
613// r = 3;
614// }
615// where the reduction value (r) only has two states, in this example 0 or 3.
616// The generated LLVM IR for this type of loop will be like this:
617// for.body:
618// %r = phi i32 [ %spec.select, %for.body ], [ 0, %entry ]
619// ...
620// %cmp = icmp sgt i32 %5, 3
621// %spec.select = select i1 %cmp, i32 3, i32 %r
622// ...
623// In general we can support vectorization of loops where 'r' flips between
624// any two non-constants, provided they are loop invariant. The only thing
625// we actually care about at the end of the loop is whether or not any lane
626// in the selected vector is different from the start value. The final
627// across-vector reduction after the loop simply involves choosing the start
628// value if nothing changed (0 in the example above) or the other selected
629// value (3 in the example above).
630RecurrenceDescriptor::InstDesc
631RecurrenceDescriptor::isSelectCmpPattern(Loop *Loop, PHINode *OrigPhi,
632 Instruction *I, InstDesc &Prev) {
633 // We must handle the select(cmp(),x,y) as a single instruction. Advance to
634 // the select.
635 CmpInst::Predicate Pred;
636 if (match(I, m_OneUse(m_Cmp(Pred, m_Value(), m_Value())))) {
637 if (auto *Select = dyn_cast<SelectInst>(*I->user_begin()))
638 return InstDesc(Select, Prev.getRecKind());
639 }
640
641 // Only match select with single use cmp condition.
642 if (!match(I, m_Select(m_OneUse(m_Cmp(Pred, m_Value(), m_Value())), m_Value(),
643 m_Value())))
644 return InstDesc(false, I);
645
646 SelectInst *SI = cast<SelectInst>(I);
647 Value *NonPhi = nullptr;
648
649 if (OrigPhi == dyn_cast<PHINode>(SI->getTrueValue()))
650 NonPhi = SI->getFalseValue();
651 else if (OrigPhi == dyn_cast<PHINode>(SI->getFalseValue()))
652 NonPhi = SI->getTrueValue();
653 else
654 return InstDesc(false, I);
655
656 // We are looking for selects of the form:
657 // select(cmp(), phi, loop_invariant) or
658 // select(cmp(), loop_invariant, phi)
659 if (!Loop->isLoopInvariant(NonPhi))
660 return InstDesc(false, I);
661
662 return InstDesc(I, isa<ICmpInst>(I->getOperand(0)) ? RecurKind::SelectICmp
663 : RecurKind::SelectFCmp);
664}
665
666RecurrenceDescriptor::InstDesc
667RecurrenceDescriptor::isMinMaxPattern(Instruction *I, RecurKind Kind,
668 const InstDesc &Prev) {
669 assert((isa<CmpInst>(I) || isa<SelectInst>(I) || isa<CallInst>(I)) &&(static_cast <bool> ((isa<CmpInst>(I) || isa<SelectInst
>(I) || isa<CallInst>(I)) && "Expected a cmp or select or call instruction"
) ? void (0) : __assert_fail ("(isa<CmpInst>(I) || isa<SelectInst>(I) || isa<CallInst>(I)) && \"Expected a cmp or select or call instruction\""
, "llvm/lib/Analysis/IVDescriptors.cpp", 670, __extension__ __PRETTY_FUNCTION__
))
670 "Expected a cmp or select or call instruction")(static_cast <bool> ((isa<CmpInst>(I) || isa<SelectInst
>(I) || isa<CallInst>(I)) && "Expected a cmp or select or call instruction"
) ? void (0) : __assert_fail ("(isa<CmpInst>(I) || isa<SelectInst>(I) || isa<CallInst>(I)) && \"Expected a cmp or select or call instruction\""
, "llvm/lib/Analysis/IVDescriptors.cpp", 670, __extension__ __PRETTY_FUNCTION__
))
;
671 if (!isMinMaxRecurrenceKind(Kind))
672 return InstDesc(false, I);
673
674 // We must handle the select(cmp()) as a single instruction. Advance to the
675 // select.
676 CmpInst::Predicate Pred;
677 if (match(I, m_OneUse(m_Cmp(Pred, m_Value(), m_Value())))) {
678 if (auto *Select = dyn_cast<SelectInst>(*I->user_begin()))
679 return InstDesc(Select, Prev.getRecKind());
680 }
681
682 // Only match select with single use cmp condition, or a min/max intrinsic.
683 if (!isa<IntrinsicInst>(I) &&
684 !match(I, m_Select(m_OneUse(m_Cmp(Pred, m_Value(), m_Value())), m_Value(),
685 m_Value())))
686 return InstDesc(false, I);
687
688 // Look for a min/max pattern.
689 if (match(I, m_UMin(m_Value(), m_Value())))
690 return InstDesc(Kind == RecurKind::UMin, I);
691 if (match(I, m_UMax(m_Value(), m_Value())))
692 return InstDesc(Kind == RecurKind::UMax, I);
693 if (match(I, m_SMax(m_Value(), m_Value())))
694 return InstDesc(Kind == RecurKind::SMax, I);
695 if (match(I, m_SMin(m_Value(), m_Value())))
696 return InstDesc(Kind == RecurKind::SMin, I);
697 if (match(I, m_OrdFMin(m_Value(), m_Value())))
698 return InstDesc(Kind == RecurKind::FMin, I);
699 if (match(I, m_OrdFMax(m_Value(), m_Value())))
700 return InstDesc(Kind == RecurKind::FMax, I);
701 if (match(I, m_UnordFMin(m_Value(), m_Value())))
702 return InstDesc(Kind == RecurKind::FMin, I);
703 if (match(I, m_UnordFMax(m_Value(), m_Value())))
704 return InstDesc(Kind == RecurKind::FMax, I);
705 if (match(I, m_Intrinsic<Intrinsic::minnum>(m_Value(), m_Value())))
706 return InstDesc(Kind == RecurKind::FMin, I);
707 if (match(I, m_Intrinsic<Intrinsic::maxnum>(m_Value(), m_Value())))
708 return InstDesc(Kind == RecurKind::FMax, I);
709
710 return InstDesc(false, I);
711}
712
713/// Returns true if the select instruction has users in the compare-and-add
714/// reduction pattern below. The select instruction argument is the last one
715/// in the sequence.
716///
717/// %sum.1 = phi ...
718/// ...
719/// %cmp = fcmp pred %0, %CFP
720/// %add = fadd %0, %sum.1
721/// %sum.2 = select %cmp, %add, %sum.1
722RecurrenceDescriptor::InstDesc
723RecurrenceDescriptor::isConditionalRdxPattern(RecurKind Kind, Instruction *I) {
724 SelectInst *SI = dyn_cast<SelectInst>(I);
725 if (!SI)
726 return InstDesc(false, I);
727
728 CmpInst *CI = dyn_cast<CmpInst>(SI->getCondition());
729 // Only handle single use cases for now.
730 if (!CI || !CI->hasOneUse())
731 return InstDesc(false, I);
732
733 Value *TrueVal = SI->getTrueValue();
734 Value *FalseVal = SI->getFalseValue();
735 // Handle only when either of operands of select instruction is a PHI
736 // node for now.
737 if ((isa<PHINode>(*TrueVal) && isa<PHINode>(*FalseVal)) ||
738 (!isa<PHINode>(*TrueVal) && !isa<PHINode>(*FalseVal)))
739 return InstDesc(false, I);
740
741 Instruction *I1 =
742 isa<PHINode>(*TrueVal) ? dyn_cast<Instruction>(FalseVal)
743 : dyn_cast<Instruction>(TrueVal);
744 if (!I1 || !I1->isBinaryOp())
745 return InstDesc(false, I);
746
747 Value *Op1, *Op2;
748 if (!(((m_FAdd(m_Value(Op1), m_Value(Op2)).match(I1) ||
749 m_FSub(m_Value(Op1), m_Value(Op2)).match(I1)) &&
750 I1->isFast()) ||
751 (m_FMul(m_Value(Op1), m_Value(Op2)).match(I1) && (I1->isFast())) ||
752 ((m_Add(m_Value(Op1), m_Value(Op2)).match(I1) ||
753 m_Sub(m_Value(Op1), m_Value(Op2)).match(I1))) ||
754 (m_Mul(m_Value(Op1), m_Value(Op2)).match(I1))))
755 return InstDesc(false, I);
756
757 Instruction *IPhi = isa<PHINode>(*Op1) ? dyn_cast<Instruction>(Op1)
758 : dyn_cast<Instruction>(Op2);
759 if (!IPhi || IPhi != FalseVal)
760 return InstDesc(false, I);
761
762 return InstDesc(true, SI);
763}
764
765RecurrenceDescriptor::InstDesc
766RecurrenceDescriptor::isRecurrenceInstr(Loop *L, PHINode *OrigPhi,
767 Instruction *I, RecurKind Kind,
768 InstDesc &Prev, FastMathFlags FuncFMF) {
769 assert(Prev.getRecKind() == RecurKind::None || Prev.getRecKind() == Kind)(static_cast <bool> (Prev.getRecKind() == RecurKind::None
|| Prev.getRecKind() == Kind) ? void (0) : __assert_fail ("Prev.getRecKind() == RecurKind::None || Prev.getRecKind() == Kind"
, "llvm/lib/Analysis/IVDescriptors.cpp", 769, __extension__ __PRETTY_FUNCTION__
))
;
770 switch (I->getOpcode()) {
771 default:
772 return InstDesc(false, I);
773 case Instruction::PHI:
774 return InstDesc(I, Prev.getRecKind(), Prev.getExactFPMathInst());
775 case Instruction::Sub:
776 case Instruction::Add:
777 return InstDesc(Kind == RecurKind::Add, I);
778 case Instruction::Mul:
779 return InstDesc(Kind == RecurKind::Mul, I);
780 case Instruction::And:
781 return InstDesc(Kind == RecurKind::And, I);
782 case Instruction::Or:
783 return InstDesc(Kind == RecurKind::Or, I);
784 case Instruction::Xor:
785 return InstDesc(Kind == RecurKind::Xor, I);
786 case Instruction::FDiv:
787 case Instruction::FMul:
788 return InstDesc(Kind == RecurKind::FMul, I,
789 I->hasAllowReassoc() ? nullptr : I);
790 case Instruction::FSub:
791 case Instruction::FAdd:
792 return InstDesc(Kind == RecurKind::FAdd, I,
793 I->hasAllowReassoc() ? nullptr : I);
794 case Instruction::Select:
795 if (Kind == RecurKind::FAdd || Kind == RecurKind::FMul ||
796 Kind == RecurKind::Add || Kind == RecurKind::Mul)
797 return isConditionalRdxPattern(Kind, I);
798 [[fallthrough]];
799 case Instruction::FCmp:
800 case Instruction::ICmp:
801 case Instruction::Call:
802 if (isSelectCmpRecurrenceKind(Kind))
803 return isSelectCmpPattern(L, OrigPhi, I, Prev);
804 if (isIntMinMaxRecurrenceKind(Kind) ||
805 (((FuncFMF.noNaNs() && FuncFMF.noSignedZeros()) ||
806 (isa<FPMathOperator>(I) && I->hasNoNaNs() &&
807 I->hasNoSignedZeros())) &&
808 isFPMinMaxRecurrenceKind(Kind)))
809 return isMinMaxPattern(I, Kind, Prev);
810 else if (isFMulAddIntrinsic(I))
811 return InstDesc(Kind == RecurKind::FMulAdd, I,
812 I->hasAllowReassoc() ? nullptr : I);
813 return InstDesc(false, I);
814 }
815}
816
817bool RecurrenceDescriptor::hasMultipleUsesOf(
818 Instruction *I, SmallPtrSetImpl<Instruction *> &Insts,
819 unsigned MaxNumUses) {
820 unsigned NumUses = 0;
821 for (const Use &U : I->operands()) {
822 if (Insts.count(dyn_cast<Instruction>(U)))
823 ++NumUses;
824 if (NumUses > MaxNumUses)
825 return true;
826 }
827
828 return false;
829}
830
831bool RecurrenceDescriptor::isReductionPHI(PHINode *Phi, Loop *TheLoop,
832 RecurrenceDescriptor &RedDes,
833 DemandedBits *DB, AssumptionCache *AC,
834 DominatorTree *DT,
835 ScalarEvolution *SE) {
836 BasicBlock *Header = TheLoop->getHeader();
837 Function &F = *Header->getParent();
838 FastMathFlags FMF;
839 FMF.setNoNaNs(
840 F.getFnAttribute("no-nans-fp-math").getValueAsBool());
841 FMF.setNoSignedZeros(
842 F.getFnAttribute("no-signed-zeros-fp-math").getValueAsBool());
843
844 if (AddReductionVar(Phi, RecurKind::Add, TheLoop, FMF, RedDes, DB, AC, DT,
845 SE)) {
846 LLVM_DEBUG(dbgs() << "Found an ADD reduction PHI." << *Phi << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("iv-descriptors")) { dbgs() << "Found an ADD reduction PHI."
<< *Phi << "\n"; } } while (false)
;
847 return true;
848 }
849 if (AddReductionVar(Phi, RecurKind::Mul, TheLoop, FMF, RedDes, DB, AC, DT,
850 SE)) {
851 LLVM_DEBUG(dbgs() << "Found a MUL reduction PHI." << *Phi << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("iv-descriptors")) { dbgs() << "Found a MUL reduction PHI."
<< *Phi << "\n"; } } while (false)
;
852 return true;
853 }
854 if (AddReductionVar(Phi, RecurKind::Or, TheLoop, FMF, RedDes, DB, AC, DT,
855 SE)) {
856 LLVM_DEBUG(dbgs() << "Found an OR reduction PHI." << *Phi << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("iv-descriptors")) { dbgs() << "Found an OR reduction PHI."
<< *Phi << "\n"; } } while (false)
;
857 return true;
858 }
859 if (AddReductionVar(Phi, RecurKind::And, TheLoop, FMF, RedDes, DB, AC, DT,
860 SE)) {
861 LLVM_DEBUG(dbgs() << "Found an AND reduction PHI." << *Phi << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("iv-descriptors")) { dbgs() << "Found an AND reduction PHI."
<< *Phi << "\n"; } } while (false)
;
862 return true;
863 }
864 if (AddReductionVar(Phi, RecurKind::Xor, TheLoop, FMF, RedDes, DB, AC, DT,
865 SE)) {
866 LLVM_DEBUG(dbgs() << "Found a XOR reduction PHI." << *Phi << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("iv-descriptors")) { dbgs() << "Found a XOR reduction PHI."
<< *Phi << "\n"; } } while (false)
;
867 return true;
868 }
869 if (AddReductionVar(Phi, RecurKind::SMax, TheLoop, FMF, RedDes, DB, AC, DT,
870 SE)) {
871 LLVM_DEBUG(dbgs() << "Found a SMAX reduction PHI." << *Phi << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("iv-descriptors")) { dbgs() << "Found a SMAX reduction PHI."
<< *Phi << "\n"; } } while (false)
;
872 return true;
873 }
874 if (AddReductionVar(Phi, RecurKind::SMin, TheLoop, FMF, RedDes, DB, AC, DT,
875 SE)) {
876 LLVM_DEBUG(dbgs() << "Found a SMIN reduction PHI." << *Phi << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("iv-descriptors")) { dbgs() << "Found a SMIN reduction PHI."
<< *Phi << "\n"; } } while (false)
;
877 return true;
878 }
879 if (AddReductionVar(Phi, RecurKind::UMax, TheLoop, FMF, RedDes, DB, AC, DT,
880 SE)) {
881 LLVM_DEBUG(dbgs() << "Found a UMAX reduction PHI." << *Phi << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("iv-descriptors")) { dbgs() << "Found a UMAX reduction PHI."
<< *Phi << "\n"; } } while (false)
;
882 return true;
883 }
884 if (AddReductionVar(Phi, RecurKind::UMin, TheLoop, FMF, RedDes, DB, AC, DT,
885 SE)) {
886 LLVM_DEBUG(dbgs() << "Found a UMIN reduction PHI." << *Phi << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("iv-descriptors")) { dbgs() << "Found a UMIN reduction PHI."
<< *Phi << "\n"; } } while (false)
;
887 return true;
888 }
889 if (AddReductionVar(Phi, RecurKind::SelectICmp, TheLoop, FMF, RedDes, DB, AC,
890 DT, SE)) {
891 LLVM_DEBUG(dbgs() << "Found an integer conditional select reduction PHI."do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("iv-descriptors")) { dbgs() << "Found an integer conditional select reduction PHI."
<< *Phi << "\n"; } } while (false)
892 << *Phi << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("iv-descriptors")) { dbgs() << "Found an integer conditional select reduction PHI."
<< *Phi << "\n"; } } while (false)
;
893 return true;
894 }
895 if (AddReductionVar(Phi, RecurKind::FMul, TheLoop, FMF, RedDes, DB, AC, DT,
896 SE)) {
897 LLVM_DEBUG(dbgs() << "Found an FMult reduction PHI." << *Phi << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("iv-descriptors")) { dbgs() << "Found an FMult reduction PHI."
<< *Phi << "\n"; } } while (false)
;
898 return true;
899 }
900 if (AddReductionVar(Phi, RecurKind::FAdd, TheLoop, FMF, RedDes, DB, AC, DT,
901 SE)) {
902 LLVM_DEBUG(dbgs() << "Found an FAdd reduction PHI." << *Phi << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("iv-descriptors")) { dbgs() << "Found an FAdd reduction PHI."
<< *Phi << "\n"; } } while (false)
;
903 return true;
904 }
905 if (AddReductionVar(Phi, RecurKind::FMax, TheLoop, FMF, RedDes, DB, AC, DT,
906 SE)) {
907 LLVM_DEBUG(dbgs() << "Found a float MAX reduction PHI." << *Phi << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("iv-descriptors")) { dbgs() << "Found a float MAX reduction PHI."
<< *Phi << "\n"; } } while (false)
;
908 return true;
909 }
910 if (AddReductionVar(Phi, RecurKind::FMin, TheLoop, FMF, RedDes, DB, AC, DT,
911 SE)) {
912 LLVM_DEBUG(dbgs() << "Found a float MIN reduction PHI." << *Phi << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("iv-descriptors")) { dbgs() << "Found a float MIN reduction PHI."
<< *Phi << "\n"; } } while (false)
;
913 return true;
914 }
915 if (AddReductionVar(Phi, RecurKind::SelectFCmp, TheLoop, FMF, RedDes, DB, AC,
916 DT, SE)) {
917 LLVM_DEBUG(dbgs() << "Found a float conditional select reduction PHI."do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("iv-descriptors")) { dbgs() << "Found a float conditional select reduction PHI."
<< " PHI." << *Phi << "\n"; } } while (false
)
918 << " PHI." << *Phi << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("iv-descriptors")) { dbgs() << "Found a float conditional select reduction PHI."
<< " PHI." << *Phi << "\n"; } } while (false
)
;
919 return true;
920 }
921 if (AddReductionVar(Phi, RecurKind::FMulAdd, TheLoop, FMF, RedDes, DB, AC, DT,
922 SE)) {
923 LLVM_DEBUG(dbgs() << "Found an FMulAdd reduction PHI." << *Phi << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("iv-descriptors")) { dbgs() << "Found an FMulAdd reduction PHI."
<< *Phi << "\n"; } } while (false)
;
924 return true;
925 }
926 // Not a reduction of known type.
927 return false;
928}
929
930bool RecurrenceDescriptor::isFixedOrderRecurrence(PHINode *Phi, Loop *TheLoop,
931 DominatorTree *DT) {
932
933 // Ensure the phi node is in the loop header and has two incoming values.
934 if (Phi->getParent() != TheLoop->getHeader() ||
935 Phi->getNumIncomingValues() != 2)
936 return false;
937
938 // Ensure the loop has a preheader and a single latch block. The loop
939 // vectorizer will need the latch to set up the next iteration of the loop.
940 auto *Preheader = TheLoop->getLoopPreheader();
941 auto *Latch = TheLoop->getLoopLatch();
942 if (!Preheader || !Latch)
943 return false;
944
945 // Ensure the phi node's incoming blocks are the loop preheader and latch.
946 if (Phi->getBasicBlockIndex(Preheader) < 0 ||
947 Phi->getBasicBlockIndex(Latch) < 0)
948 return false;
949
950 // Get the previous value. The previous value comes from the latch edge while
951 // the initial value comes from the preheader edge.
952 auto *Previous = dyn_cast<Instruction>(Phi->getIncomingValueForBlock(Latch));
953
954 // If Previous is a phi in the header, go through incoming values from the
955 // latch until we find a non-phi value. Use this as the new Previous, all uses
956 // in the header will be dominated by the original phi, but need to be moved
957 // after the non-phi previous value.
958 SmallPtrSet<PHINode *, 4> SeenPhis;
959 while (auto *PrevPhi = dyn_cast_or_null<PHINode>(Previous)) {
960 if (PrevPhi->getParent() != Phi->getParent())
961 return false;
962 if (!SeenPhis.insert(PrevPhi).second)
963 return false;
964 Previous = dyn_cast<Instruction>(PrevPhi->getIncomingValueForBlock(Latch));
965 }
966
967 if (!Previous || !TheLoop->contains(Previous) || isa<PHINode>(Previous))
968 return false;
969
970 // Ensure every user of the phi node (recursively) is dominated by the
971 // previous value. The dominance requirement ensures the loop vectorizer will
972 // not need to vectorize the initial value prior to the first iteration of the
973 // loop.
974 // TODO: Consider extending this sinking to handle memory instructions.
975
976 SmallPtrSet<Value *, 8> Seen;
977 BasicBlock *PhiBB = Phi->getParent();
978 SmallVector<Instruction *, 8> WorkList;
979 auto TryToPushSinkCandidate = [&](Instruction *SinkCandidate) {
980 // Cyclic dependence.
981 if (Previous == SinkCandidate)
982 return false;
983
984 if (!Seen.insert(SinkCandidate).second)
985 return true;
986 if (DT->dominates(Previous,
987 SinkCandidate)) // We already are good w/o sinking.
988 return true;
989
990 if (SinkCandidate->getParent() != PhiBB ||
991 SinkCandidate->mayHaveSideEffects() ||
992 SinkCandidate->mayReadFromMemory() || SinkCandidate->isTerminator())
993 return false;
994
995 // If we reach a PHI node that is not dominated by Previous, we reached a
996 // header PHI. No need for sinking.
997 if (isa<PHINode>(SinkCandidate))
998 return true;
999
1000 // Sink User tentatively and check its users
1001 WorkList.push_back(SinkCandidate);
1002 return true;
1003 };
1004
1005 WorkList.push_back(Phi);
1006 // Try to recursively sink instructions and their users after Previous.
1007 while (!WorkList.empty()) {
1008 Instruction *Current = WorkList.pop_back_val();
1009 for (User *User : Current->users()) {
1010 if (!TryToPushSinkCandidate(cast<Instruction>(User)))
1011 return false;
1012 }
1013 }
1014
1015 return true;
1016}
1017
1018/// This function returns the identity element (or neutral element) for
1019/// the operation K.
1020Value *RecurrenceDescriptor::getRecurrenceIdentity(RecurKind K, Type *Tp,
1021 FastMathFlags FMF) const {
1022 switch (K) {
1023 case RecurKind::Xor:
1024 case RecurKind::Add:
1025 case RecurKind::Or:
1026 // Adding, Xoring, Oring zero to a number does not change it.
1027 return ConstantInt::get(Tp, 0);
1028 case RecurKind::Mul:
1029 // Multiplying a number by 1 does not change it.
1030 return ConstantInt::get(Tp, 1);
1031 case RecurKind::And:
1032 // AND-ing a number with an all-1 value does not change it.
1033 return ConstantInt::get(Tp, -1, true);
1034 case RecurKind::FMul:
1035 // Multiplying a number by 1 does not change it.
1036 return ConstantFP::get(Tp, 1.0L);
1037 case RecurKind::FMulAdd:
1038 case RecurKind::FAdd:
1039 // Adding zero to a number does not change it.
1040 // FIXME: Ideally we should not need to check FMF for FAdd and should always
1041 // use -0.0. However, this will currently result in mixed vectors of 0.0/-0.0.
1042 // Instead, we should ensure that 1) the FMF from FAdd are propagated to the PHI
1043 // nodes where possible, and 2) PHIs with the nsz flag + -0.0 use 0.0. This would
1044 // mean we can then remove the check for noSignedZeros() below (see D98963).
1045 if (FMF.noSignedZeros())
1046 return ConstantFP::get(Tp, 0.0L);
1047 return ConstantFP::get(Tp, -0.0L);
1048 case RecurKind::UMin:
1049 return ConstantInt::get(Tp, -1, true);
1050 case RecurKind::UMax:
1051 return ConstantInt::get(Tp, 0);
1052 case RecurKind::SMin:
1053 return ConstantInt::get(Tp,
1054 APInt::getSignedMaxValue(Tp->getIntegerBitWidth()));
1055 case RecurKind::SMax:
1056 return ConstantInt::get(Tp,
1057 APInt::getSignedMinValue(Tp->getIntegerBitWidth()));
1058 case RecurKind::FMin:
1059 assert((FMF.noNaNs() && FMF.noSignedZeros()) &&(static_cast <bool> ((FMF.noNaNs() && FMF.noSignedZeros
()) && "nnan, nsz is expected to be set for FP min reduction."
) ? void (0) : __assert_fail ("(FMF.noNaNs() && FMF.noSignedZeros()) && \"nnan, nsz is expected to be set for FP min reduction.\""
, "llvm/lib/Analysis/IVDescriptors.cpp", 1060, __extension__ __PRETTY_FUNCTION__
))
1060 "nnan, nsz is expected to be set for FP min reduction.")(static_cast <bool> ((FMF.noNaNs() && FMF.noSignedZeros
()) && "nnan, nsz is expected to be set for FP min reduction."
) ? void (0) : __assert_fail ("(FMF.noNaNs() && FMF.noSignedZeros()) && \"nnan, nsz is expected to be set for FP min reduction.\""
, "llvm/lib/Analysis/IVDescriptors.cpp", 1060, __extension__ __PRETTY_FUNCTION__
))
;
1061 return ConstantFP::getInfinity(Tp, false /*Negative*/);
1062 case RecurKind::FMax:
1063 assert((FMF.noNaNs() && FMF.noSignedZeros()) &&(static_cast <bool> ((FMF.noNaNs() && FMF.noSignedZeros
()) && "nnan, nsz is expected to be set for FP max reduction."
) ? void (0) : __assert_fail ("(FMF.noNaNs() && FMF.noSignedZeros()) && \"nnan, nsz is expected to be set for FP max reduction.\""
, "llvm/lib/Analysis/IVDescriptors.cpp", 1064, __extension__ __PRETTY_FUNCTION__
))
1064 "nnan, nsz is expected to be set for FP max reduction.")(static_cast <bool> ((FMF.noNaNs() && FMF.noSignedZeros
()) && "nnan, nsz is expected to be set for FP max reduction."
) ? void (0) : __assert_fail ("(FMF.noNaNs() && FMF.noSignedZeros()) && \"nnan, nsz is expected to be set for FP max reduction.\""
, "llvm/lib/Analysis/IVDescriptors.cpp", 1064, __extension__ __PRETTY_FUNCTION__
))
;
1065 return ConstantFP::getInfinity(Tp, true /*Negative*/);
1066 case RecurKind::SelectICmp:
1067 case RecurKind::SelectFCmp:
1068 return getRecurrenceStartValue();
1069 break;
1070 default:
1071 llvm_unreachable("Unknown recurrence kind")::llvm::llvm_unreachable_internal("Unknown recurrence kind", "llvm/lib/Analysis/IVDescriptors.cpp"
, 1071)
;
1072 }
1073}
1074
1075unsigned RecurrenceDescriptor::getOpcode(RecurKind Kind) {
1076 switch (Kind) {
1077 case RecurKind::Add:
1078 return Instruction::Add;
1079 case RecurKind::Mul:
1080 return Instruction::Mul;
1081 case RecurKind::Or:
1082 return Instruction::Or;
1083 case RecurKind::And:
1084 return Instruction::And;
1085 case RecurKind::Xor:
1086 return Instruction::Xor;
1087 case RecurKind::FMul:
1088 return Instruction::FMul;
1089 case RecurKind::FMulAdd:
1090 case RecurKind::FAdd:
1091 return Instruction::FAdd;
1092 case RecurKind::SMax:
1093 case RecurKind::SMin:
1094 case RecurKind::UMax:
1095 case RecurKind::UMin:
1096 case RecurKind::SelectICmp:
1097 return Instruction::ICmp;
1098 case RecurKind::FMax:
1099 case RecurKind::FMin:
1100 case RecurKind::SelectFCmp:
1101 return Instruction::FCmp;
1102 default:
1103 llvm_unreachable("Unknown recurrence operation")::llvm::llvm_unreachable_internal("Unknown recurrence operation"
, "llvm/lib/Analysis/IVDescriptors.cpp", 1103)
;
1104 }
1105}
1106
1107SmallVector<Instruction *, 4>
1108RecurrenceDescriptor::getReductionOpChain(PHINode *Phi, Loop *L) const {
1109 SmallVector<Instruction *, 4> ReductionOperations;
1110 unsigned RedOp = getOpcode(Kind);
1111
1112 // Search down from the Phi to the LoopExitInstr, looking for instructions
1113 // with a single user of the correct type for the reduction.
1114
1115 // Note that we check that the type of the operand is correct for each item in
1116 // the chain, including the last (the loop exit value). This can come up from
1117 // sub, which would otherwise be treated as an add reduction. MinMax also need
1118 // to check for a pair of icmp/select, for which we use getNextInstruction and
1119 // isCorrectOpcode functions to step the right number of instruction, and
1120 // check the icmp/select pair.
1121 // FIXME: We also do not attempt to look through Select's yet, which might
1122 // be part of the reduction chain, or attempt to looks through And's to find a
1123 // smaller bitwidth. Subs are also currently not allowed (which are usually
1124 // treated as part of a add reduction) as they are expected to generally be
1125 // more expensive than out-of-loop reductions, and need to be costed more
1126 // carefully.
1127 unsigned ExpectedUses = 1;
1128 if (RedOp
0.1
'RedOp' is equal to ICmp
== Instruction::ICmp || RedOp == Instruction::FCmp)
1129 ExpectedUses = 2;
1130
1131 auto getNextInstruction = [&](Instruction *Cur) -> Instruction * {
1132 for (auto *User : Cur->users()) {
1133 Instruction *UI = cast<Instruction>(User);
1134 if (isa<PHINode>(UI))
1135 continue;
1136 if (RedOp == Instruction::ICmp || RedOp == Instruction::FCmp) {
1137 // We are expecting a icmp/select pair, which we go to the next select
1138 // instruction if we can. We already know that Cur has 2 uses.
1139 if (isa<SelectInst>(UI))
1140 return UI;
1141 continue;
1142 }
1143 return UI;
1144 }
1145 return nullptr;
1146 };
1147 auto isCorrectOpcode = [&](Instruction *Cur) {
1148 if (RedOp == Instruction::ICmp || RedOp == Instruction::FCmp) {
1149 Value *LHS, *RHS;
1150 return SelectPatternResult::isMinOrMax(
1151 matchSelectPattern(Cur, LHS, RHS).Flavor);
1152 }
1153 // Recognize a call to the llvm.fmuladd intrinsic.
1154 if (isFMulAddIntrinsic(Cur))
1155 return true;
1156
1157 return Cur->getOpcode() == RedOp;
1158 };
1159
1160 // Attempt to look through Phis which are part of the reduction chain
1161 unsigned ExtraPhiUses = 0;
1162 Instruction *RdxInstr = LoopExitInstr;
1163 if (auto ExitPhi
1.1
'ExitPhi' is non-null
= dyn_cast<PHINode>(LoopExitInstr)) {
1
Assuming field 'LoopExitInstr' is a 'CastReturnType'
2
Taking true branch
1164 if (ExitPhi->getNumIncomingValues() != 2)
3
Assuming the condition is false
4
Taking false branch
1165 return {};
1166
1167 Instruction *Inc0 = dyn_cast<Instruction>(ExitPhi->getIncomingValue(0));
5
Assuming the object is not a 'CastReturnType'
1168 Instruction *Inc1 = dyn_cast<Instruction>(ExitPhi->getIncomingValue(1));
1169
1170 Instruction *Chain = nullptr;
1171 if (Inc0 == Phi)
6
Assuming 'Inc0' is equal to 'Phi'
7
Taking true branch
1172 Chain = Inc1;
1173 else if (Inc1 == Phi)
1174 Chain = Inc0;
1175 else
1176 return {};
1177
1178 RdxInstr = Chain;
1179 ExtraPhiUses = 1;
1180 }
1181
1182 // The loop exit instruction we check first (as a quick test) but add last. We
1183 // check the opcode is correct (and dont allow them to be Subs) and that they
1184 // have expected to have the expected number of uses. They will have one use
1185 // from the phi and one from a LCSSA value, no matter the type.
1186 if (!isCorrectOpcode(RdxInstr) || !LoopExitInstr->hasNUses(2))
8
Assuming the condition is false
9
Taking false branch
1187 return {};
1188
1189 // Check that the Phi has one (or two for min/max) uses, plus an extra use
1190 // for conditional reductions.
1191 if (!Phi->hasNUses(ExpectedUses + ExtraPhiUses))
10
Called C++ object pointer is null
1192 return {};
1193
1194 Instruction *Cur = getNextInstruction(Phi);
1195
1196 // Each other instruction in the chain should have the expected number of uses
1197 // and be the correct opcode.
1198 while (Cur != RdxInstr) {
1199 if (!Cur || !isCorrectOpcode(Cur) || !Cur->hasNUses(ExpectedUses))
1200 return {};
1201
1202 ReductionOperations.push_back(Cur);
1203 Cur = getNextInstruction(Cur);
1204 }
1205
1206 ReductionOperations.push_back(Cur);
1207 return ReductionOperations;
1208}
1209
1210InductionDescriptor::InductionDescriptor(Value *Start, InductionKind K,
1211 const SCEV *Step, BinaryOperator *BOp,
1212 Type *ElementType,
1213 SmallVectorImpl<Instruction *> *Casts)
1214 : StartValue(Start), IK(K), Step(Step), InductionBinOp(BOp),
1215 ElementType(ElementType) {
1216 assert(IK != IK_NoInduction && "Not an induction")(static_cast <bool> (IK != IK_NoInduction && "Not an induction"
) ? void (0) : __assert_fail ("IK != IK_NoInduction && \"Not an induction\""
, "llvm/lib/Analysis/IVDescriptors.cpp", 1216, __extension__ __PRETTY_FUNCTION__
))
;
1217
1218 // Start value type should match the induction kind and the value
1219 // itself should not be null.
1220 assert(StartValue && "StartValue is null")(static_cast <bool> (StartValue && "StartValue is null"
) ? void (0) : __assert_fail ("StartValue && \"StartValue is null\""
, "llvm/lib/Analysis/IVDescriptors.cpp", 1220, __extension__ __PRETTY_FUNCTION__
))
;
1221 assert((IK != IK_PtrInduction || StartValue->getType()->isPointerTy()) &&(static_cast <bool> ((IK != IK_PtrInduction || StartValue
->getType()->isPointerTy()) && "StartValue is not a pointer for pointer induction"
) ? void (0) : __assert_fail ("(IK != IK_PtrInduction || StartValue->getType()->isPointerTy()) && \"StartValue is not a pointer for pointer induction\""
, "llvm/lib/Analysis/IVDescriptors.cpp", 1222, __extension__ __PRETTY_FUNCTION__
))
1222 "StartValue is not a pointer for pointer induction")(static_cast <bool> ((IK != IK_PtrInduction || StartValue
->getType()->isPointerTy()) && "StartValue is not a pointer for pointer induction"
) ? void (0) : __assert_fail ("(IK != IK_PtrInduction || StartValue->getType()->isPointerTy()) && \"StartValue is not a pointer for pointer induction\""
, "llvm/lib/Analysis/IVDescriptors.cpp", 1222, __extension__ __PRETTY_FUNCTION__
))
;
1223 assert((IK != IK_IntInduction || StartValue->getType()->isIntegerTy()) &&(static_cast <bool> ((IK != IK_IntInduction || StartValue
->getType()->isIntegerTy()) && "StartValue is not an integer for integer induction"
) ? void (0) : __assert_fail ("(IK != IK_IntInduction || StartValue->getType()->isIntegerTy()) && \"StartValue is not an integer for integer induction\""
, "llvm/lib/Analysis/IVDescriptors.cpp", 1224, __extension__ __PRETTY_FUNCTION__
))
1224 "StartValue is not an integer for integer induction")(static_cast <bool> ((IK != IK_IntInduction || StartValue
->getType()->isIntegerTy()) && "StartValue is not an integer for integer induction"
) ? void (0) : __assert_fail ("(IK != IK_IntInduction || StartValue->getType()->isIntegerTy()) && \"StartValue is not an integer for integer induction\""
, "llvm/lib/Analysis/IVDescriptors.cpp", 1224, __extension__ __PRETTY_FUNCTION__
))
;
1225
1226 // Check the Step Value. It should be non-zero integer value.
1227 assert((!getConstIntStepValue() || !getConstIntStepValue()->isZero()) &&(static_cast <bool> ((!getConstIntStepValue() || !getConstIntStepValue
()->isZero()) && "Step value is zero") ? void (0) :
__assert_fail ("(!getConstIntStepValue() || !getConstIntStepValue()->isZero()) && \"Step value is zero\""
, "llvm/lib/Analysis/IVDescriptors.cpp", 1228, __extension__ __PRETTY_FUNCTION__
))
1228 "Step value is zero")(static_cast <bool> ((!getConstIntStepValue() || !getConstIntStepValue
()->isZero()) && "Step value is zero") ? void (0) :
__assert_fail ("(!getConstIntStepValue() || !getConstIntStepValue()->isZero()) && \"Step value is zero\""
, "llvm/lib/Analysis/IVDescriptors.cpp", 1228, __extension__ __PRETTY_FUNCTION__
))
;
1229
1230 assert((IK == IK_FpInduction || Step->getType()->isIntegerTy()) &&(static_cast <bool> ((IK == IK_FpInduction || Step->
getType()->isIntegerTy()) && "StepValue is not an integer"
) ? void (0) : __assert_fail ("(IK == IK_FpInduction || Step->getType()->isIntegerTy()) && \"StepValue is not an integer\""
, "llvm/lib/Analysis/IVDescriptors.cpp", 1231, __extension__ __PRETTY_FUNCTION__
))
1231 "StepValue is not an integer")(static_cast <bool> ((IK == IK_FpInduction || Step->
getType()->isIntegerTy()) && "StepValue is not an integer"
) ? void (0) : __assert_fail ("(IK == IK_FpInduction || Step->getType()->isIntegerTy()) && \"StepValue is not an integer\""
, "llvm/lib/Analysis/IVDescriptors.cpp", 1231, __extension__ __PRETTY_FUNCTION__
))
;
1232
1233 assert((IK != IK_FpInduction || Step->getType()->isFloatingPointTy()) &&(static_cast <bool> ((IK != IK_FpInduction || Step->
getType()->isFloatingPointTy()) && "StepValue is not FP for FpInduction"
) ? void (0) : __assert_fail ("(IK != IK_FpInduction || Step->getType()->isFloatingPointTy()) && \"StepValue is not FP for FpInduction\""
, "llvm/lib/Analysis/IVDescriptors.cpp", 1234, __extension__ __PRETTY_FUNCTION__
))
1234 "StepValue is not FP for FpInduction")(static_cast <bool> ((IK != IK_FpInduction || Step->
getType()->isFloatingPointTy()) && "StepValue is not FP for FpInduction"
) ? void (0) : __assert_fail ("(IK != IK_FpInduction || Step->getType()->isFloatingPointTy()) && \"StepValue is not FP for FpInduction\""
, "llvm/lib/Analysis/IVDescriptors.cpp", 1234, __extension__ __PRETTY_FUNCTION__
))
;
1235 assert((IK != IK_FpInduction ||(static_cast <bool> ((IK != IK_FpInduction || (InductionBinOp
&& (InductionBinOp->getOpcode() == Instruction::FAdd
|| InductionBinOp->getOpcode() == Instruction::FSub))) &&
"Binary opcode should be specified for FP induction") ? void
(0) : __assert_fail ("(IK != IK_FpInduction || (InductionBinOp && (InductionBinOp->getOpcode() == Instruction::FAdd || InductionBinOp->getOpcode() == Instruction::FSub))) && \"Binary opcode should be specified for FP induction\""
, "llvm/lib/Analysis/IVDescriptors.cpp", 1239, __extension__ __PRETTY_FUNCTION__
))
1236 (InductionBinOp &&(static_cast <bool> ((IK != IK_FpInduction || (InductionBinOp
&& (InductionBinOp->getOpcode() == Instruction::FAdd
|| InductionBinOp->getOpcode() == Instruction::FSub))) &&
"Binary opcode should be specified for FP induction") ? void
(0) : __assert_fail ("(IK != IK_FpInduction || (InductionBinOp && (InductionBinOp->getOpcode() == Instruction::FAdd || InductionBinOp->getOpcode() == Instruction::FSub))) && \"Binary opcode should be specified for FP induction\""
, "llvm/lib/Analysis/IVDescriptors.cpp", 1239, __extension__ __PRETTY_FUNCTION__
))
1237 (InductionBinOp->getOpcode() == Instruction::FAdd ||(static_cast <bool> ((IK != IK_FpInduction || (InductionBinOp
&& (InductionBinOp->getOpcode() == Instruction::FAdd
|| InductionBinOp->getOpcode() == Instruction::FSub))) &&
"Binary opcode should be specified for FP induction") ? void
(0) : __assert_fail ("(IK != IK_FpInduction || (InductionBinOp && (InductionBinOp->getOpcode() == Instruction::FAdd || InductionBinOp->getOpcode() == Instruction::FSub))) && \"Binary opcode should be specified for FP induction\""
, "llvm/lib/Analysis/IVDescriptors.cpp", 1239, __extension__ __PRETTY_FUNCTION__
))
1238 InductionBinOp->getOpcode() == Instruction::FSub))) &&(static_cast <bool> ((IK != IK_FpInduction || (InductionBinOp
&& (InductionBinOp->getOpcode() == Instruction::FAdd
|| InductionBinOp->getOpcode() == Instruction::FSub))) &&
"Binary opcode should be specified for FP induction") ? void
(0) : __assert_fail ("(IK != IK_FpInduction || (InductionBinOp && (InductionBinOp->getOpcode() == Instruction::FAdd || InductionBinOp->getOpcode() == Instruction::FSub))) && \"Binary opcode should be specified for FP induction\""
, "llvm/lib/Analysis/IVDescriptors.cpp", 1239, __extension__ __PRETTY_FUNCTION__
))
1239 "Binary opcode should be specified for FP induction")(static_cast <bool> ((IK != IK_FpInduction || (InductionBinOp
&& (InductionBinOp->getOpcode() == Instruction::FAdd
|| InductionBinOp->getOpcode() == Instruction::FSub))) &&
"Binary opcode should be specified for FP induction") ? void
(0) : __assert_fail ("(IK != IK_FpInduction || (InductionBinOp && (InductionBinOp->getOpcode() == Instruction::FAdd || InductionBinOp->getOpcode() == Instruction::FSub))) && \"Binary opcode should be specified for FP induction\""
, "llvm/lib/Analysis/IVDescriptors.cpp", 1239, __extension__ __PRETTY_FUNCTION__
))
;
1240
1241 if (IK == IK_PtrInduction)
1242 assert(ElementType && "Pointer induction must have element type")(static_cast <bool> (ElementType && "Pointer induction must have element type"
) ? void (0) : __assert_fail ("ElementType && \"Pointer induction must have element type\""
, "llvm/lib/Analysis/IVDescriptors.cpp", 1242, __extension__ __PRETTY_FUNCTION__
))
;
1243 else
1244 assert(!ElementType && "Non-pointer induction cannot have element type")(static_cast <bool> (!ElementType && "Non-pointer induction cannot have element type"
) ? void (0) : __assert_fail ("!ElementType && \"Non-pointer induction cannot have element type\""
, "llvm/lib/Analysis/IVDescriptors.cpp", 1244, __extension__ __PRETTY_FUNCTION__
))
;
1245
1246 if (Casts) {
1247 for (auto &Inst : *Casts) {
1248 RedundantCasts.push_back(Inst);
1249 }
1250 }
1251}
1252
1253ConstantInt *InductionDescriptor::getConstIntStepValue() const {
1254 if (isa<SCEVConstant>(Step))
1255 return dyn_cast<ConstantInt>(cast<SCEVConstant>(Step)->getValue());
1256 return nullptr;
1257}
1258
1259bool InductionDescriptor::isFPInductionPHI(PHINode *Phi, const Loop *TheLoop,
1260 ScalarEvolution *SE,
1261 InductionDescriptor &D) {
1262
1263 // Here we only handle FP induction variables.
1264 assert(Phi->getType()->isFloatingPointTy() && "Unexpected Phi type")(static_cast <bool> (Phi->getType()->isFloatingPointTy
() && "Unexpected Phi type") ? void (0) : __assert_fail
("Phi->getType()->isFloatingPointTy() && \"Unexpected Phi type\""
, "llvm/lib/Analysis/IVDescriptors.cpp", 1264, __extension__ __PRETTY_FUNCTION__
))
;
1265
1266 if (TheLoop->getHeader() != Phi->getParent())
1267 return false;
1268
1269 // The loop may have multiple entrances or multiple exits; we can analyze
1270 // this phi if it has a unique entry value and a unique backedge value.
1271 if (Phi->getNumIncomingValues() != 2)
1272 return false;
1273 Value *BEValue = nullptr, *StartValue = nullptr;
1274 if (TheLoop->contains(Phi->getIncomingBlock(0))) {
1275 BEValue = Phi->getIncomingValue(0);
1276 StartValue = Phi->getIncomingValue(1);
1277 } else {
1278 assert(TheLoop->contains(Phi->getIncomingBlock(1)) &&(static_cast <bool> (TheLoop->contains(Phi->getIncomingBlock
(1)) && "Unexpected Phi node in the loop") ? void (0)
: __assert_fail ("TheLoop->contains(Phi->getIncomingBlock(1)) && \"Unexpected Phi node in the loop\""
, "llvm/lib/Analysis/IVDescriptors.cpp", 1279, __extension__ __PRETTY_FUNCTION__
))
1279 "Unexpected Phi node in the loop")(static_cast <bool> (TheLoop->contains(Phi->getIncomingBlock
(1)) && "Unexpected Phi node in the loop") ? void (0)
: __assert_fail ("TheLoop->contains(Phi->getIncomingBlock(1)) && \"Unexpected Phi node in the loop\""
, "llvm/lib/Analysis/IVDescriptors.cpp", 1279, __extension__ __PRETTY_FUNCTION__
))
;
1280 BEValue = Phi->getIncomingValue(1);
1281 StartValue = Phi->getIncomingValue(0);
1282 }
1283
1284 BinaryOperator *BOp = dyn_cast<BinaryOperator>(BEValue);
1285 if (!BOp)
1286 return false;
1287
1288 Value *Addend = nullptr;
1289 if (BOp->getOpcode() == Instruction::FAdd) {
1290 if (BOp->getOperand(0) == Phi)
1291 Addend = BOp->getOperand(1);
1292 else if (BOp->getOperand(1) == Phi)
1293 Addend = BOp->getOperand(0);
1294 } else if (BOp->getOpcode() == Instruction::FSub)
1295 if (BOp->getOperand(0) == Phi)
1296 Addend = BOp->getOperand(1);
1297
1298 if (!Addend)
1299 return false;
1300
1301 // The addend should be loop invariant
1302 if (auto *I = dyn_cast<Instruction>(Addend))
1303 if (TheLoop->contains(I))
1304 return false;
1305
1306 // FP Step has unknown SCEV
1307 const SCEV *Step = SE->getUnknown(Addend);
1308 D = InductionDescriptor(StartValue, IK_FpInduction, Step, BOp);
1309 return true;
1310}
1311
1312/// This function is called when we suspect that the update-chain of a phi node
1313/// (whose symbolic SCEV expression sin \p PhiScev) contains redundant casts,
1314/// that can be ignored. (This can happen when the PSCEV rewriter adds a runtime
1315/// predicate P under which the SCEV expression for the phi can be the
1316/// AddRecurrence \p AR; See createAddRecFromPHIWithCast). We want to find the
1317/// cast instructions that are involved in the update-chain of this induction.
1318/// A caller that adds the required runtime predicate can be free to drop these
1319/// cast instructions, and compute the phi using \p AR (instead of some scev
1320/// expression with casts).
1321///
1322/// For example, without a predicate the scev expression can take the following
1323/// form:
1324/// (Ext ix (Trunc iy ( Start + i*Step ) to ix) to iy)
1325///
1326/// It corresponds to the following IR sequence:
1327/// %for.body:
1328/// %x = phi i64 [ 0, %ph ], [ %add, %for.body ]
1329/// %casted_phi = "ExtTrunc i64 %x"
1330/// %add = add i64 %casted_phi, %step
1331///
1332/// where %x is given in \p PN,
1333/// PSE.getSCEV(%x) is equal to PSE.getSCEV(%casted_phi) under a predicate,
1334/// and the IR sequence that "ExtTrunc i64 %x" represents can take one of
1335/// several forms, for example, such as:
1336/// ExtTrunc1: %casted_phi = and %x, 2^n-1
1337/// or:
1338/// ExtTrunc2: %t = shl %x, m
1339/// %casted_phi = ashr %t, m
1340///
1341/// If we are able to find such sequence, we return the instructions
1342/// we found, namely %casted_phi and the instructions on its use-def chain up
1343/// to the phi (not including the phi).
1344static bool getCastsForInductionPHI(PredicatedScalarEvolution &PSE,
1345 const SCEVUnknown *PhiScev,
1346 const SCEVAddRecExpr *AR,
1347 SmallVectorImpl<Instruction *> &CastInsts) {
1348
1349 assert(CastInsts.empty() && "CastInsts is expected to be empty.")(static_cast <bool> (CastInsts.empty() && "CastInsts is expected to be empty."
) ? void (0) : __assert_fail ("CastInsts.empty() && \"CastInsts is expected to be empty.\""
, "llvm/lib/Analysis/IVDescriptors.cpp", 1349, __extension__ __PRETTY_FUNCTION__
))
;
1350 auto *PN = cast<PHINode>(PhiScev->getValue());
1351 assert(PSE.getSCEV(PN) == AR && "Unexpected phi node SCEV expression")(static_cast <bool> (PSE.getSCEV(PN) == AR && "Unexpected phi node SCEV expression"
) ? void (0) : __assert_fail ("PSE.getSCEV(PN) == AR && \"Unexpected phi node SCEV expression\""
, "llvm/lib/Analysis/IVDescriptors.cpp", 1351, __extension__ __PRETTY_FUNCTION__
))
;
1352 const Loop *L = AR->getLoop();
1353
1354 // Find any cast instructions that participate in the def-use chain of
1355 // PhiScev in the loop.
1356 // FORNOW/TODO: We currently expect the def-use chain to include only
1357 // two-operand instructions, where one of the operands is an invariant.
1358 // createAddRecFromPHIWithCasts() currently does not support anything more
1359 // involved than that, so we keep the search simple. This can be
1360 // extended/generalized as needed.
1361
1362 auto getDef = [&](const Value *Val) -> Value * {
1363 const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Val);
1364 if (!BinOp)
1365 return nullptr;
1366 Value *Op0 = BinOp->getOperand(0);
1367 Value *Op1 = BinOp->getOperand(1);
1368 Value *Def = nullptr;
1369 if (L->isLoopInvariant(Op0))
1370 Def = Op1;
1371 else if (L->isLoopInvariant(Op1))
1372 Def = Op0;
1373 return Def;
1374 };
1375
1376 // Look for the instruction that defines the induction via the
1377 // loop backedge.
1378 BasicBlock *Latch = L->getLoopLatch();
1379 if (!Latch)
1380 return false;
1381 Value *Val = PN->getIncomingValueForBlock(Latch);
1382 if (!Val)
1383 return false;
1384
1385 // Follow the def-use chain until the induction phi is reached.
1386 // If on the way we encounter a Value that has the same SCEV Expr as the
1387 // phi node, we can consider the instructions we visit from that point
1388 // as part of the cast-sequence that can be ignored.
1389 bool InCastSequence = false;
1390 auto *Inst = dyn_cast<Instruction>(Val);
1391 while (Val != PN) {
1392 // If we encountered a phi node other than PN, or if we left the loop,
1393 // we bail out.
1394 if (!Inst || !L->contains(Inst)) {
1395 return false;
1396 }
1397 auto *AddRec = dyn_cast<SCEVAddRecExpr>(PSE.getSCEV(Val));
1398 if (AddRec && PSE.areAddRecsEqualWithPreds(AddRec, AR))
1399 InCastSequence = true;
1400 if (InCastSequence) {
1401 // Only the last instruction in the cast sequence is expected to have
1402 // uses outside the induction def-use chain.
1403 if (!CastInsts.empty())
1404 if (!Inst->hasOneUse())
1405 return false;
1406 CastInsts.push_back(Inst);
1407 }
1408 Val = getDef(Val);
1409 if (!Val)
1410 return false;
1411 Inst = dyn_cast<Instruction>(Val);
1412 }
1413
1414 return InCastSequence;
1415}
1416
1417bool InductionDescriptor::isInductionPHI(PHINode *Phi, const Loop *TheLoop,
1418 PredicatedScalarEvolution &PSE,
1419 InductionDescriptor &D, bool Assume) {
1420 Type *PhiTy = Phi->getType();
1421
1422 // Handle integer and pointer inductions variables.
1423 // Now we handle also FP induction but not trying to make a
1424 // recurrent expression from the PHI node in-place.
1425
1426 if (!PhiTy->isIntegerTy() && !PhiTy->isPointerTy() && !PhiTy->isFloatTy() &&
1427 !PhiTy->isDoubleTy() && !PhiTy->isHalfTy())
1428 return false;
1429
1430 if (PhiTy->isFloatingPointTy())
1431 return isFPInductionPHI(Phi, TheLoop, PSE.getSE(), D);
1432
1433 const SCEV *PhiScev = PSE.getSCEV(Phi);
1434 const auto *AR = dyn_cast<SCEVAddRecExpr>(PhiScev);
1435
1436 // We need this expression to be an AddRecExpr.
1437 if (Assume && !AR)
1438 AR = PSE.getAsAddRec(Phi);
1439
1440 if (!AR) {
1441 LLVM_DEBUG(dbgs() << "LV: PHI is not a poly recurrence.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("iv-descriptors")) { dbgs() << "LV: PHI is not a poly recurrence.\n"
; } } while (false)
;
1442 return false;
1443 }
1444
1445 // Record any Cast instructions that participate in the induction update
1446 const auto *SymbolicPhi = dyn_cast<SCEVUnknown>(PhiScev);
1447 // If we started from an UnknownSCEV, and managed to build an addRecurrence
1448 // only after enabling Assume with PSCEV, this means we may have encountered
1449 // cast instructions that required adding a runtime check in order to
1450 // guarantee the correctness of the AddRecurrence respresentation of the
1451 // induction.
1452 if (PhiScev != AR && SymbolicPhi) {
1453 SmallVector<Instruction *, 2> Casts;
1454 if (getCastsForInductionPHI(PSE, SymbolicPhi, AR, Casts))
1455 return isInductionPHI(Phi, TheLoop, PSE.getSE(), D, AR, &Casts);
1456 }
1457
1458 return isInductionPHI(Phi, TheLoop, PSE.getSE(), D, AR);
1459}
1460
1461bool InductionDescriptor::isInductionPHI(
1462 PHINode *Phi, const Loop *TheLoop, ScalarEvolution *SE,
1463 InductionDescriptor &D, const SCEV *Expr,
1464 SmallVectorImpl<Instruction *> *CastsToIgnore) {
1465 Type *PhiTy = Phi->getType();
1466 // We only handle integer and pointer inductions variables.
1467 if (!PhiTy->isIntegerTy() && !PhiTy->isPointerTy())
1468 return false;
1469
1470 // Check that the PHI is consecutive.
1471 const SCEV *PhiScev = Expr ? Expr : SE->getSCEV(Phi);
1472 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PhiScev);
1473
1474 if (!AR) {
1475 LLVM_DEBUG(dbgs() << "LV: PHI is not a poly recurrence.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("iv-descriptors")) { dbgs() << "LV: PHI is not a poly recurrence.\n"
; } } while (false)
;
1476 return false;
1477 }
1478
1479 if (AR->getLoop() != TheLoop) {
1480 // FIXME: We should treat this as a uniform. Unfortunately, we
1481 // don't currently know how to handled uniform PHIs.
1482 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("iv-descriptors")) { dbgs() << "LV: PHI is a recurrence with respect to an outer loop.\n"
; } } while (false)
1483 dbgs() << "LV: PHI is a recurrence with respect to an outer loop.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("iv-descriptors")) { dbgs() << "LV: PHI is a recurrence with respect to an outer loop.\n"
; } } while (false)
;
1484 return false;
1485 }
1486
1487 // This function assumes that InductionPhi is called only on Phi nodes
1488 // present inside loop headers. Check for the same, and throw an assert if
1489 // the current Phi is not present inside the loop header.
1490 assert(Phi->getParent() == AR->getLoop()->getHeader()(static_cast <bool> (Phi->getParent() == AR->getLoop
()->getHeader() && "Invalid Phi node, not present in loop header"
) ? void (0) : __assert_fail ("Phi->getParent() == AR->getLoop()->getHeader() && \"Invalid Phi node, not present in loop header\""
, "llvm/lib/Analysis/IVDescriptors.cpp", 1491, __extension__ __PRETTY_FUNCTION__
))
1491 && "Invalid Phi node, not present in loop header")(static_cast <bool> (Phi->getParent() == AR->getLoop
()->getHeader() && "Invalid Phi node, not present in loop header"
) ? void (0) : __assert_fail ("Phi->getParent() == AR->getLoop()->getHeader() && \"Invalid Phi node, not present in loop header\""
, "llvm/lib/Analysis/IVDescriptors.cpp", 1491, __extension__ __PRETTY_FUNCTION__
))
;
1492
1493 Value *StartValue =
1494 Phi->getIncomingValueForBlock(AR->getLoop()->getLoopPreheader());
1495
1496 BasicBlock *Latch = AR->getLoop()->getLoopLatch();
1497 if (!Latch)
1498 return false;
1499
1500 const SCEV *Step = AR->getStepRecurrence(*SE);
1501 // Calculate the pointer stride and check if it is consecutive.
1502 // The stride may be a constant or a loop invariant integer value.
1503 const SCEVConstant *ConstStep = dyn_cast<SCEVConstant>(Step);
1504 if (!ConstStep && !SE->isLoopInvariant(Step, TheLoop))
1505 return false;
1506
1507 if (PhiTy->isIntegerTy()) {
1508 BinaryOperator *BOp =
1509 dyn_cast<BinaryOperator>(Phi->getIncomingValueForBlock(Latch));
1510 D = InductionDescriptor(StartValue, IK_IntInduction, Step, BOp,
1511 /* ElementType */ nullptr, CastsToIgnore);
1512 return true;
1513 }
1514
1515 assert(PhiTy->isPointerTy() && "The PHI must be a pointer")(static_cast <bool> (PhiTy->isPointerTy() &&
"The PHI must be a pointer") ? void (0) : __assert_fail ("PhiTy->isPointerTy() && \"The PHI must be a pointer\""
, "llvm/lib/Analysis/IVDescriptors.cpp", 1515, __extension__ __PRETTY_FUNCTION__
))
;
1516 PointerType *PtrTy = cast<PointerType>(PhiTy);
1517
1518 // Always use i8 element type for opaque pointer inductions.
1519 // This allows induction variables w/non-constant steps.
1520 if (PtrTy->isOpaque()) {
1521 D = InductionDescriptor(StartValue, IK_PtrInduction, Step,
1522 /* BinOp */ nullptr,
1523 Type::getInt8Ty(PtrTy->getContext()));
1524 return true;
1525 }
1526
1527 // Pointer induction should be a constant.
1528 // TODO: This could be generalized, but should probably just
1529 // be dropped instead once the migration to opaque ptrs is
1530 // complete.
1531 if (!ConstStep)
1532 return false;
1533
1534 Type *ElementType = PtrTy->getNonOpaquePointerElementType();
1535 if (!ElementType->isSized())
1536 return false;
1537
1538 ConstantInt *CV = ConstStep->getValue();
1539 const DataLayout &DL = Phi->getModule()->getDataLayout();
1540 TypeSize TySize = DL.getTypeAllocSize(ElementType);
1541 // TODO: We could potentially support this for scalable vectors if we can
1542 // prove at compile time that the constant step is always a multiple of
1543 // the scalable type.
1544 if (TySize.isZero() || TySize.isScalable())
1545 return false;
1546
1547 int64_t Size = static_cast<int64_t>(TySize.getFixedValue());
1548 int64_t CVSize = CV->getSExtValue();
1549 if (CVSize % Size)
1550 return false;
1551 auto *StepValue =
1552 SE->getConstant(CV->getType(), CVSize / Size, true /* signed */);
1553 D = InductionDescriptor(StartValue, IK_PtrInduction, StepValue,
1554 /* BinOp */ nullptr, ElementType);
1555 return true;
1556}