Bug Summary

File:llvm/include/llvm/ADT/APInt.h
Warning:line 927, column 15
Assigned value is garbage or undefined

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name BasicAliasAnalysis.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/Analysis -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/Analysis -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/Analysis -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/include -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/include -D NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/Analysis -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e=. -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-09-04-040900-46481-1 -x c++ /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/Analysis/BasicAliasAnalysis.cpp

/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/Analysis/BasicAliasAnalysis.cpp

1//===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the primary stateless implementation of the
10// Alias Analysis interface that implements identities (two different
11// globals cannot alias, etc), but does no stateful analysis.
12//
13//===----------------------------------------------------------------------===//
14
15#include "llvm/Analysis/BasicAliasAnalysis.h"
16#include "llvm/ADT/APInt.h"
17#include "llvm/ADT/ScopeExit.h"
18#include "llvm/ADT/SmallPtrSet.h"
19#include "llvm/ADT/SmallVector.h"
20#include "llvm/ADT/Statistic.h"
21#include "llvm/Analysis/AliasAnalysis.h"
22#include "llvm/Analysis/AssumptionCache.h"
23#include "llvm/Analysis/CFG.h"
24#include "llvm/Analysis/CaptureTracking.h"
25#include "llvm/Analysis/InstructionSimplify.h"
26#include "llvm/Analysis/MemoryBuiltins.h"
27#include "llvm/Analysis/MemoryLocation.h"
28#include "llvm/Analysis/PhiValues.h"
29#include "llvm/Analysis/TargetLibraryInfo.h"
30#include "llvm/Analysis/ValueTracking.h"
31#include "llvm/IR/Argument.h"
32#include "llvm/IR/Attributes.h"
33#include "llvm/IR/Constant.h"
34#include "llvm/IR/Constants.h"
35#include "llvm/IR/DataLayout.h"
36#include "llvm/IR/DerivedTypes.h"
37#include "llvm/IR/Dominators.h"
38#include "llvm/IR/Function.h"
39#include "llvm/IR/GetElementPtrTypeIterator.h"
40#include "llvm/IR/GlobalAlias.h"
41#include "llvm/IR/GlobalVariable.h"
42#include "llvm/IR/InstrTypes.h"
43#include "llvm/IR/Instruction.h"
44#include "llvm/IR/Instructions.h"
45#include "llvm/IR/IntrinsicInst.h"
46#include "llvm/IR/Intrinsics.h"
47#include "llvm/IR/Metadata.h"
48#include "llvm/IR/Operator.h"
49#include "llvm/IR/Type.h"
50#include "llvm/IR/User.h"
51#include "llvm/IR/Value.h"
52#include "llvm/InitializePasses.h"
53#include "llvm/Pass.h"
54#include "llvm/Support/Casting.h"
55#include "llvm/Support/CommandLine.h"
56#include "llvm/Support/Compiler.h"
57#include "llvm/Support/KnownBits.h"
58#include <cassert>
59#include <cstdint>
60#include <cstdlib>
61#include <utility>
62
63#define DEBUG_TYPE"basicaa" "basicaa"
64
65using namespace llvm;
66
67/// Enable analysis of recursive PHI nodes.
68static cl::opt<bool> EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden,
69 cl::init(true));
70
71/// By default, even on 32-bit architectures we use 64-bit integers for
72/// calculations. This will allow us to more-aggressively decompose indexing
73/// expressions calculated using i64 values (e.g., long long in C) which is
74/// common enough to worry about.
75static cl::opt<bool> ForceAtLeast64Bits("basic-aa-force-at-least-64b",
76 cl::Hidden, cl::init(true));
77static cl::opt<bool> DoubleCalcBits("basic-aa-double-calc-bits",
78 cl::Hidden, cl::init(false));
79
80/// SearchLimitReached / SearchTimes shows how often the limit of
81/// to decompose GEPs is reached. It will affect the precision
82/// of basic alias analysis.
83STATISTIC(SearchLimitReached, "Number of times the limit to "static llvm::Statistic SearchLimitReached = {"basicaa", "SearchLimitReached"
, "Number of times the limit to " "decompose GEPs is reached"
}
84 "decompose GEPs is reached")static llvm::Statistic SearchLimitReached = {"basicaa", "SearchLimitReached"
, "Number of times the limit to " "decompose GEPs is reached"
}
;
85STATISTIC(SearchTimes, "Number of times a GEP is decomposed")static llvm::Statistic SearchTimes = {"basicaa", "SearchTimes"
, "Number of times a GEP is decomposed"}
;
86
87/// Cutoff after which to stop analysing a set of phi nodes potentially involved
88/// in a cycle. Because we are analysing 'through' phi nodes, we need to be
89/// careful with value equivalence. We use reachability to make sure a value
90/// cannot be involved in a cycle.
91const unsigned MaxNumPhiBBsValueReachabilityCheck = 20;
92
93// The max limit of the search depth in DecomposeGEPExpression() and
94// getUnderlyingObject(), both functions need to use the same search
95// depth otherwise the algorithm in aliasGEP will assert.
96static const unsigned MaxLookupSearchDepth = 6;
97
98bool BasicAAResult::invalidate(Function &Fn, const PreservedAnalyses &PA,
99 FunctionAnalysisManager::Invalidator &Inv) {
100 // We don't care if this analysis itself is preserved, it has no state. But
101 // we need to check that the analyses it depends on have been. Note that we
102 // may be created without handles to some analyses and in that case don't
103 // depend on them.
104 if (Inv.invalidate<AssumptionAnalysis>(Fn, PA) ||
105 (DT && Inv.invalidate<DominatorTreeAnalysis>(Fn, PA)) ||
106 (PV && Inv.invalidate<PhiValuesAnalysis>(Fn, PA)))
107 return true;
108
109 // Otherwise this analysis result remains valid.
110 return false;
111}
112
113//===----------------------------------------------------------------------===//
114// Useful predicates
115//===----------------------------------------------------------------------===//
116
117/// Returns true if the pointer is one which would have been considered an
118/// escape by isNonEscapingLocalObject.
119static bool isEscapeSource(const Value *V) {
120 if (isa<CallBase>(V))
121 return true;
122
123 if (isa<Argument>(V))
124 return true;
125
126 // The load case works because isNonEscapingLocalObject considers all
127 // stores to be escapes (it passes true for the StoreCaptures argument
128 // to PointerMayBeCaptured).
129 if (isa<LoadInst>(V))
130 return true;
131
132 // The inttoptr case works because isNonEscapingLocalObject considers all
133 // means of converting or equating a pointer to an int (ptrtoint, ptr store
134 // which could be followed by an integer load, ptr<->int compare) as
135 // escaping, and objects located at well-known addresses via platform-specific
136 // means cannot be considered non-escaping local objects.
137 if (isa<IntToPtrInst>(V))
138 return true;
139
140 return false;
141}
142
143/// Returns the size of the object specified by V or UnknownSize if unknown.
144static uint64_t getObjectSize(const Value *V, const DataLayout &DL,
145 const TargetLibraryInfo &TLI,
146 bool NullIsValidLoc,
147 bool RoundToAlign = false) {
148 uint64_t Size;
149 ObjectSizeOpts Opts;
150 Opts.RoundToAlign = RoundToAlign;
151 Opts.NullIsUnknownSize = NullIsValidLoc;
152 if (getObjectSize(V, Size, DL, &TLI, Opts))
153 return Size;
154 return MemoryLocation::UnknownSize;
155}
156
157/// Returns true if we can prove that the object specified by V is smaller than
158/// Size.
159static bool isObjectSmallerThan(const Value *V, uint64_t Size,
160 const DataLayout &DL,
161 const TargetLibraryInfo &TLI,
162 bool NullIsValidLoc) {
163 // Note that the meanings of the "object" are slightly different in the
164 // following contexts:
165 // c1: llvm::getObjectSize()
166 // c2: llvm.objectsize() intrinsic
167 // c3: isObjectSmallerThan()
168 // c1 and c2 share the same meaning; however, the meaning of "object" in c3
169 // refers to the "entire object".
170 //
171 // Consider this example:
172 // char *p = (char*)malloc(100)
173 // char *q = p+80;
174 //
175 // In the context of c1 and c2, the "object" pointed by q refers to the
176 // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
177 //
178 // However, in the context of c3, the "object" refers to the chunk of memory
179 // being allocated. So, the "object" has 100 bytes, and q points to the middle
180 // the "object". In case q is passed to isObjectSmallerThan() as the 1st
181 // parameter, before the llvm::getObjectSize() is called to get the size of
182 // entire object, we should:
183 // - either rewind the pointer q to the base-address of the object in
184 // question (in this case rewind to p), or
185 // - just give up. It is up to caller to make sure the pointer is pointing
186 // to the base address the object.
187 //
188 // We go for 2nd option for simplicity.
189 if (!isIdentifiedObject(V))
190 return false;
191
192 // This function needs to use the aligned object size because we allow
193 // reads a bit past the end given sufficient alignment.
194 uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc,
195 /*RoundToAlign*/ true);
196
197 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size;
198}
199
200/// Return the minimal extent from \p V to the end of the underlying object,
201/// assuming the result is used in an aliasing query. E.g., we do use the query
202/// location size and the fact that null pointers cannot alias here.
203static uint64_t getMinimalExtentFrom(const Value &V,
204 const LocationSize &LocSize,
205 const DataLayout &DL,
206 bool NullIsValidLoc) {
207 // If we have dereferenceability information we know a lower bound for the
208 // extent as accesses for a lower offset would be valid. We need to exclude
209 // the "or null" part if null is a valid pointer.
210 bool CanBeNull, CanBeFreed;
211 uint64_t DerefBytes =
212 V.getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
213 DerefBytes = (CanBeNull && NullIsValidLoc) ? 0 : DerefBytes;
214 DerefBytes = CanBeFreed ? 0 : DerefBytes;
215 // If queried with a precise location size, we assume that location size to be
216 // accessed, thus valid.
217 if (LocSize.isPrecise())
218 DerefBytes = std::max(DerefBytes, LocSize.getValue());
219 return DerefBytes;
220}
221
222/// Returns true if we can prove that the object specified by V has size Size.
223static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL,
224 const TargetLibraryInfo &TLI, bool NullIsValidLoc) {
225 uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc);
226 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size;
227}
228
229//===----------------------------------------------------------------------===//
230// GetElementPtr Instruction Decomposition and Analysis
231//===----------------------------------------------------------------------===//
232
233namespace {
234/// Represents zext(sext(V)).
235struct ExtendedValue {
236 const Value *V;
237 unsigned ZExtBits;
238 unsigned SExtBits;
239
240 explicit ExtendedValue(const Value *V, unsigned ZExtBits = 0,
241 unsigned SExtBits = 0)
242 : V(V), ZExtBits(ZExtBits), SExtBits(SExtBits) {}
243
244 unsigned getBitWidth() const {
245 return V->getType()->getPrimitiveSizeInBits() + ZExtBits + SExtBits;
246 }
247
248 ExtendedValue withValue(const Value *NewV) const {
249 return ExtendedValue(NewV, ZExtBits, SExtBits);
250 }
251
252 ExtendedValue withZExtOfValue(const Value *NewV) const {
253 unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
254 NewV->getType()->getPrimitiveSizeInBits();
255 // zext(sext(zext(NewV))) == zext(zext(zext(NewV)))
256 return ExtendedValue(NewV, ZExtBits + SExtBits + ExtendBy, 0);
257 }
258
259 ExtendedValue withSExtOfValue(const Value *NewV) const {
260 unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
261 NewV->getType()->getPrimitiveSizeInBits();
262 // zext(sext(sext(NewV)))
263 return ExtendedValue(NewV, ZExtBits, SExtBits + ExtendBy);
264 }
265
266 APInt evaluateWith(APInt N) const {
267 assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&(static_cast<void> (0))
268 "Incompatible bit width")(static_cast<void> (0));
269 if (SExtBits) N = N.sext(N.getBitWidth() + SExtBits);
270 if (ZExtBits) N = N.zext(N.getBitWidth() + ZExtBits);
271 return N;
272 }
273
274 bool canDistributeOver(bool NUW, bool NSW) const {
275 // zext(x op<nuw> y) == zext(x) op<nuw> zext(y)
276 // sext(x op<nsw> y) == sext(x) op<nsw> sext(y)
277 return (!ZExtBits
42.1
Field 'ZExtBits' is 0
42.1
Field 'ZExtBits' is 0
|| NUW) && (!SExtBits
42.2
Field 'SExtBits' is 0
42.2
Field 'SExtBits' is 0
|| NSW)
;
43
Returning the value 1, which participates in a condition later
278 }
279};
280
281/// Represents zext(sext(V)) * Scale + Offset.
282struct LinearExpression {
283 ExtendedValue Val;
284 APInt Scale;
285 APInt Offset;
286
287 /// True if all operations in this expression are NSW.
288 bool IsNSW;
289
290 LinearExpression(const ExtendedValue &Val, const APInt &Scale,
291 const APInt &Offset, bool IsNSW)
292 : Val(Val), Scale(Scale), Offset(Offset), IsNSW(IsNSW) {}
293
294 LinearExpression(const ExtendedValue &Val) : Val(Val), IsNSW(true) {
295 unsigned BitWidth = Val.getBitWidth();
296 Scale = APInt(BitWidth, 1);
297 Offset = APInt(BitWidth, 0);
298 }
299};
300}
301
302/// Analyzes the specified value as a linear expression: "A*V + B", where A and
303/// B are constant integers.
304static LinearExpression GetLinearExpression(
305 const ExtendedValue &Val, const DataLayout &DL, unsigned Depth,
306 AssumptionCache *AC, DominatorTree *DT) {
307 // Limit our recursion depth.
308 if (Depth
21.1
'Depth' is not equal to 6
32.1
'Depth' is not equal to 6
21.1
'Depth' is not equal to 6
32.1
'Depth' is not equal to 6
== 6)
22
Taking false branch
33
Taking false branch
309 return Val;
310
311 if (const ConstantInt *Const
23.1
'Const' is null
34.1
'Const' is null
23.1
'Const' is null
34.1
'Const' is null
= dyn_cast<ConstantInt>(Val.V))
23
Field 'V' is not a 'ConstantInt'
24
Taking false branch
34
Assuming field 'V' is not a 'ConstantInt'
35
Taking false branch
312 return LinearExpression(Val, APInt(Val.getBitWidth(), 0),
313 Val.evaluateWith(Const->getValue()), true);
314
315 if (const BinaryOperator *BOp
25.1
'BOp' is null
36.1
'BOp' is non-null
25.1
'BOp' is null
36.1
'BOp' is non-null
= dyn_cast<BinaryOperator>(Val.V)) {
25
Assuming field 'V' is not a 'BinaryOperator'
26
Taking false branch
36
Assuming field 'V' is a 'BinaryOperator'
37
Taking true branch
316 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
38
Assuming 'RHSC' is non-null
39
Taking true branch
317 APInt RHS = Val.evaluateWith(RHSC->getValue());
318 // The only non-OBO case we deal with is or, and only limited to the
319 // case where it is both nuw and nsw.
320 bool NUW = true, NSW = true;
321 if (isa<OverflowingBinaryOperator>(BOp)) {
40
Assuming 'BOp' is not a 'OverflowingBinaryOperator'
41
Taking false branch
322 NUW &= BOp->hasNoUnsignedWrap();
323 NSW &= BOp->hasNoSignedWrap();
324 }
325 if (!Val.canDistributeOver(NUW, NSW))
42
Calling 'ExtendedValue::canDistributeOver'
44
Returning from 'ExtendedValue::canDistributeOver'
45
Taking false branch
326 return Val;
327
328 LinearExpression E(Val);
329 switch (BOp->getOpcode()) {
46
Control jumps to 'case Shl:' at line 364
330 default:
331 // We don't understand this instruction, so we can't decompose it any
332 // further.
333 return Val;
334 case Instruction::Or:
335 // X|C == X+C if all the bits in C are unset in X. Otherwise we can't
336 // analyze it.
337 if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC,
338 BOp, DT))
339 return Val;
340
341 LLVM_FALLTHROUGH[[gnu::fallthrough]];
342 case Instruction::Add: {
343 E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
344 Depth + 1, AC, DT);
345 E.Offset += RHS;
346 E.IsNSW &= NSW;
347 break;
348 }
349 case Instruction::Sub: {
350 E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
351 Depth + 1, AC, DT);
352 E.Offset -= RHS;
353 E.IsNSW &= NSW;
354 break;
355 }
356 case Instruction::Mul: {
357 E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
358 Depth + 1, AC, DT);
359 E.Offset *= RHS;
360 E.Scale *= RHS;
361 E.IsNSW &= NSW;
362 break;
363 }
364 case Instruction::Shl:
365 // We're trying to linearize an expression of the kind:
366 // shl i8 -128, 36
367 // where the shift count exceeds the bitwidth of the type.
368 // We can't decompose this further (the expression would return
369 // a poison value).
370 if (RHS.getLimitedValue() > Val.getBitWidth())
47
Assuming the condition is false
48
Taking false branch
371 return Val;
372
373 E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
374 Depth + 1, AC, DT);
375 E.Offset <<= RHS.getLimitedValue();
49
Passing the value 18446744073709551615 via 1st parameter 'Limit'
50
Calling 'APInt::getLimitedValue'
54
Returning from 'APInt::getLimitedValue'
55
Passing the value 4294967295 via 1st parameter 'ShiftAmt'
56
Calling 'APInt::operator<<='
376 E.Scale <<= RHS.getLimitedValue();
377 E.IsNSW &= NSW;
378 break;
379 }
380 return E;
381 }
382 }
383
384 if (isa<ZExtInst>(Val.V))
27
Assuming field 'V' is not a 'ZExtInst'
28
Taking false branch
385 return GetLinearExpression(
386 Val.withZExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
387 DL, Depth + 1, AC, DT);
388
389 if (isa<SExtInst>(Val.V))
29
Assuming field 'V' is a 'SExtInst'
30
Taking true branch
390 return GetLinearExpression(
32
Calling 'GetLinearExpression'
391 Val.withSExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
31
Field 'V' is a 'CastInst'
392 DL, Depth + 1, AC, DT);
393
394 return Val;
395}
396
397/// To ensure a pointer offset fits in an integer of size PointerSize
398/// (in bits) when that size is smaller than the maximum pointer size. This is
399/// an issue, for example, in particular for 32b pointers with negative indices
400/// that rely on two's complement wrap-arounds for precise alias information
401/// where the maximum pointer size is 64b.
402static APInt adjustToPointerSize(const APInt &Offset, unsigned PointerSize) {
403 assert(PointerSize <= Offset.getBitWidth() && "Invalid PointerSize!")(static_cast<void> (0));
404 unsigned ShiftBits = Offset.getBitWidth() - PointerSize;
405 return (Offset << ShiftBits).ashr(ShiftBits);
406}
407
408static unsigned getMaxPointerSize(const DataLayout &DL) {
409 unsigned MaxPointerSize = DL.getMaxPointerSizeInBits();
410 if (MaxPointerSize < 64 && ForceAtLeast64Bits) MaxPointerSize = 64;
411 if (DoubleCalcBits) MaxPointerSize *= 2;
412
413 return MaxPointerSize;
414}
415
416/// If V is a symbolic pointer expression, decompose it into a base pointer
417/// with a constant offset and a number of scaled symbolic offsets.
418///
419/// The scaled symbolic offsets (represented by pairs of a Value* and a scale
420/// in the VarIndices vector) are Value*'s that are known to be scaled by the
421/// specified amount, but which may have other unrepresented high bits. As
422/// such, the gep cannot necessarily be reconstructed from its decomposed form.
423///
424/// This function is capable of analyzing everything that getUnderlyingObject
425/// can look through. To be able to do that getUnderlyingObject and
426/// DecomposeGEPExpression must use the same search depth
427/// (MaxLookupSearchDepth).
428BasicAAResult::DecomposedGEP
429BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL,
430 AssumptionCache *AC, DominatorTree *DT) {
431 // Limit recursion depth to limit compile time in crazy cases.
432 unsigned MaxLookup = MaxLookupSearchDepth;
433 SearchTimes++;
434 const Instruction *CxtI = dyn_cast<Instruction>(V);
2
Assuming 'V' is not a 'Instruction'
435
436 unsigned MaxPointerSize = getMaxPointerSize(DL);
437 DecomposedGEP Decomposed;
438 Decomposed.Offset = APInt(MaxPointerSize, 0);
439 Decomposed.HasCompileTimeConstantScale = true;
440 do {
441 // See if this is a bitcast or GEP.
442 const Operator *Op = dyn_cast<Operator>(V);
3
Assuming 'V' is a 'Operator'
443 if (!Op
3.1
'Op' is non-null
3.1
'Op' is non-null
) {
444 // The only non-operator case we can handle are GlobalAliases.
445 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
446 if (!GA->isInterposable()) {
447 V = GA->getAliasee();
448 continue;
449 }
450 }
451 Decomposed.Base = V;
452 return Decomposed;
453 }
454
455 if (Op->getOpcode() == Instruction::BitCast ||
4
Assuming the condition is false
6
Taking false branch
456 Op->getOpcode() == Instruction::AddrSpaceCast) {
5
Assuming the condition is false
457 V = Op->getOperand(0);
458 continue;
459 }
460
461 const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
7
Assuming 'Op' is a 'GEPOperator'
462 if (!GEPOp
7.1
'GEPOp' is non-null
7.1
'GEPOp' is non-null
) {
8
Taking false branch
463 if (const auto *PHI = dyn_cast<PHINode>(V)) {
464 // Look through single-arg phi nodes created by LCSSA.
465 if (PHI->getNumIncomingValues() == 1) {
466 V = PHI->getIncomingValue(0);
467 continue;
468 }
469 } else if (const auto *Call = dyn_cast<CallBase>(V)) {
470 // CaptureTracking can know about special capturing properties of some
471 // intrinsics like launder.invariant.group, that can't be expressed with
472 // the attributes, but have properties like returning aliasing pointer.
473 // Because some analysis may assume that nocaptured pointer is not
474 // returned from some special intrinsic (because function would have to
475 // be marked with returns attribute), it is crucial to use this function
476 // because it should be in sync with CaptureTracking. Not using it may
477 // cause weird miscompilations where 2 aliasing pointers are assumed to
478 // noalias.
479 if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
480 V = RP;
481 continue;
482 }
483 }
484
485 Decomposed.Base = V;
486 return Decomposed;
487 }
488
489 // Track whether we've seen at least one in bounds gep, and if so, whether
490 // all geps parsed were in bounds.
491 if (Decomposed.InBounds == None)
9
Taking true branch
492 Decomposed.InBounds = GEPOp->isInBounds();
493 else if (!GEPOp->isInBounds())
494 Decomposed.InBounds = false;
495
496 // Don't attempt to analyze GEPs over unsized objects.
497 if (!GEPOp->getSourceElementType()->isSized()) {
10
Taking false branch
498 Decomposed.Base = V;
499 return Decomposed;
500 }
501
502 // Don't attempt to analyze GEPs if index scale is not a compile-time
503 // constant.
504 if (isa<ScalableVectorType>(GEPOp->getSourceElementType())) {
11
Assuming the object is not a 'ScalableVectorType'
12
Taking false branch
505 Decomposed.Base = V;
506 Decomposed.HasCompileTimeConstantScale = false;
507 return Decomposed;
508 }
509
510 unsigned AS = GEPOp->getPointerAddressSpace();
511 // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
512 gep_type_iterator GTI = gep_type_begin(GEPOp);
513 unsigned PointerSize = DL.getPointerSizeInBits(AS);
514 // Assume all GEP operands are constants until proven otherwise.
515 bool GepHasConstantOffset = true;
516 for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
14
Loop condition is true. Entering loop body
517 I != E; ++I, ++GTI) {
13
Assuming 'I' is not equal to 'E'
518 const Value *Index = *I;
519 // Compute the (potentially symbolic) offset in bytes for this index.
520 if (StructType *STy = GTI.getStructTypeOrNull()) {
15
Assuming 'STy' is null
16
Taking false branch
521 // For a struct, add the member offset.
522 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
523 if (FieldNo == 0)
524 continue;
525
526 Decomposed.Offset += DL.getStructLayout(STy)->getElementOffset(FieldNo);
527 continue;
528 }
529
530 // For an array/pointer, add the element offset, explicitly scaled.
531 if (const ConstantInt *CIdx
17.1
'CIdx' is null
17.1
'CIdx' is null
= dyn_cast<ConstantInt>(Index)) {
17
Assuming 'Index' is not a 'ConstantInt'
18
Taking false branch
532 if (CIdx->isZero())
533 continue;
534 Decomposed.Offset +=
535 DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize() *
536 CIdx->getValue().sextOrTrunc(MaxPointerSize);
537 continue;
538 }
539
540 GepHasConstantOffset = false;
541
542 APInt Scale(MaxPointerSize,
543 DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize());
544 // If the integer type is smaller than the pointer size, it is implicitly
545 // sign extended to pointer size.
546 unsigned Width = Index->getType()->getIntegerBitWidth();
547 unsigned SExtBits = PointerSize > Width ? PointerSize - Width : 0;
19
Assuming 'PointerSize' is <= 'Width'
20
'?' condition is false
548 LinearExpression LE = GetLinearExpression(
21
Calling 'GetLinearExpression'
549 ExtendedValue(Index, 0, SExtBits), DL, 0, AC, DT);
550
551 // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
552 // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
553
554 // It can be the case that, even through C1*V+C2 does not overflow for
555 // relevant values of V, (C2*Scale) can overflow. In that case, we cannot
556 // decompose the expression in this way.
557 //
558 // FIXME: C1*Scale and the other operations in the decomposed
559 // (C1*Scale)*V+C2*Scale can also overflow. We should check for this
560 // possibility.
561 bool Overflow;
562 APInt ScaledOffset = LE.Offset.sextOrTrunc(MaxPointerSize)
563 .smul_ov(Scale, Overflow);
564 if (Overflow) {
565 LE = LinearExpression(ExtendedValue(Index, 0, SExtBits));
566 } else {
567 Decomposed.Offset += ScaledOffset;
568 Scale *= LE.Scale.sextOrTrunc(MaxPointerSize);
569 }
570
571 // If we already had an occurrence of this index variable, merge this
572 // scale into it. For example, we want to handle:
573 // A[x][x] -> x*16 + x*4 -> x*20
574 // This also ensures that 'x' only appears in the index list once.
575 for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) {
576 if (Decomposed.VarIndices[i].V == LE.Val.V &&
577 Decomposed.VarIndices[i].ZExtBits == LE.Val.ZExtBits &&
578 Decomposed.VarIndices[i].SExtBits == LE.Val.SExtBits) {
579 Scale += Decomposed.VarIndices[i].Scale;
580 Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i);
581 break;
582 }
583 }
584
585 // Make sure that we have a scale that makes sense for this target's
586 // pointer size.
587 Scale = adjustToPointerSize(Scale, PointerSize);
588
589 if (!!Scale) {
590 VariableGEPIndex Entry = {
591 LE.Val.V, LE.Val.ZExtBits, LE.Val.SExtBits, Scale, CxtI, LE.IsNSW};
592 Decomposed.VarIndices.push_back(Entry);
593 }
594 }
595
596 // Take care of wrap-arounds
597 if (GepHasConstantOffset)
598 Decomposed.Offset = adjustToPointerSize(Decomposed.Offset, PointerSize);
599
600 // Analyze the base pointer next.
601 V = GEPOp->getOperand(0);
602 } while (--MaxLookup);
603
604 // If the chain of expressions is too deep, just return early.
605 Decomposed.Base = V;
606 SearchLimitReached++;
607 return Decomposed;
608}
609
610/// Returns whether the given pointer value points to memory that is local to
611/// the function, with global constants being considered local to all
612/// functions.
613bool BasicAAResult::pointsToConstantMemory(const MemoryLocation &Loc,
614 AAQueryInfo &AAQI, bool OrLocal) {
615 assert(Visited.empty() && "Visited must be cleared after use!")(static_cast<void> (0));
616
617 unsigned MaxLookup = 8;
618 SmallVector<const Value *, 16> Worklist;
619 Worklist.push_back(Loc.Ptr);
620 do {
621 const Value *V = getUnderlyingObject(Worklist.pop_back_val());
622 if (!Visited.insert(V).second) {
623 Visited.clear();
624 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
625 }
626
627 // An alloca instruction defines local memory.
628 if (OrLocal && isa<AllocaInst>(V))
629 continue;
630
631 // A global constant counts as local memory for our purposes.
632 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
633 // Note: this doesn't require GV to be "ODR" because it isn't legal for a
634 // global to be marked constant in some modules and non-constant in
635 // others. GV may even be a declaration, not a definition.
636 if (!GV->isConstant()) {
637 Visited.clear();
638 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
639 }
640 continue;
641 }
642
643 // If both select values point to local memory, then so does the select.
644 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
645 Worklist.push_back(SI->getTrueValue());
646 Worklist.push_back(SI->getFalseValue());
647 continue;
648 }
649
650 // If all values incoming to a phi node point to local memory, then so does
651 // the phi.
652 if (const PHINode *PN = dyn_cast<PHINode>(V)) {
653 // Don't bother inspecting phi nodes with many operands.
654 if (PN->getNumIncomingValues() > MaxLookup) {
655 Visited.clear();
656 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
657 }
658 append_range(Worklist, PN->incoming_values());
659 continue;
660 }
661
662 // Otherwise be conservative.
663 Visited.clear();
664 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
665 } while (!Worklist.empty() && --MaxLookup);
666
667 Visited.clear();
668 return Worklist.empty();
669}
670
671static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID) {
672 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call);
673 return II && II->getIntrinsicID() == IID;
674}
675
676/// Returns the behavior when calling the given call site.
677FunctionModRefBehavior BasicAAResult::getModRefBehavior(const CallBase *Call) {
678 if (Call->doesNotAccessMemory())
679 // Can't do better than this.
680 return FMRB_DoesNotAccessMemory;
681
682 FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
683
684 // If the callsite knows it only reads memory, don't return worse
685 // than that.
686 if (Call->onlyReadsMemory())
687 Min = FMRB_OnlyReadsMemory;
688 else if (Call->doesNotReadMemory())
689 Min = FMRB_OnlyWritesMemory;
690
691 if (Call->onlyAccessesArgMemory())
692 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees);
693 else if (Call->onlyAccessesInaccessibleMemory())
694 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem);
695 else if (Call->onlyAccessesInaccessibleMemOrArgMem())
696 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem);
697
698 // If the call has operand bundles then aliasing attributes from the function
699 // it calls do not directly apply to the call. This can be made more precise
700 // in the future.
701 if (!Call->hasOperandBundles())
702 if (const Function *F = Call->getCalledFunction())
703 Min =
704 FunctionModRefBehavior(Min & getBestAAResults().getModRefBehavior(F));
705
706 return Min;
707}
708
709/// Returns the behavior when calling the given function. For use when the call
710/// site is not known.
711FunctionModRefBehavior BasicAAResult::getModRefBehavior(const Function *F) {
712 // If the function declares it doesn't access memory, we can't do better.
713 if (F->doesNotAccessMemory())
714 return FMRB_DoesNotAccessMemory;
715
716 FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
717
718 // If the function declares it only reads memory, go with that.
719 if (F->onlyReadsMemory())
720 Min = FMRB_OnlyReadsMemory;
721 else if (F->doesNotReadMemory())
722 Min = FMRB_OnlyWritesMemory;
723
724 if (F->onlyAccessesArgMemory())
725 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees);
726 else if (F->onlyAccessesInaccessibleMemory())
727 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem);
728 else if (F->onlyAccessesInaccessibleMemOrArgMem())
729 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem);
730
731 return Min;
732}
733
734/// Returns true if this is a writeonly (i.e Mod only) parameter.
735static bool isWriteOnlyParam(const CallBase *Call, unsigned ArgIdx,
736 const TargetLibraryInfo &TLI) {
737 if (Call->paramHasAttr(ArgIdx, Attribute::WriteOnly))
738 return true;
739
740 // We can bound the aliasing properties of memset_pattern16 just as we can
741 // for memcpy/memset. This is particularly important because the
742 // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
743 // whenever possible.
744 // FIXME Consider handling this in InferFunctionAttr.cpp together with other
745 // attributes.
746 LibFunc F;
747 if (Call->getCalledFunction() &&
748 TLI.getLibFunc(*Call->getCalledFunction(), F) &&
749 F == LibFunc_memset_pattern16 && TLI.has(F))
750 if (ArgIdx == 0)
751 return true;
752
753 // TODO: memset_pattern4, memset_pattern8
754 // TODO: _chk variants
755 // TODO: strcmp, strcpy
756
757 return false;
758}
759
760ModRefInfo BasicAAResult::getArgModRefInfo(const CallBase *Call,
761 unsigned ArgIdx) {
762 // Checking for known builtin intrinsics and target library functions.
763 if (isWriteOnlyParam(Call, ArgIdx, TLI))
764 return ModRefInfo::Mod;
765
766 if (Call->paramHasAttr(ArgIdx, Attribute::ReadOnly))
767 return ModRefInfo::Ref;
768
769 if (Call->paramHasAttr(ArgIdx, Attribute::ReadNone))
770 return ModRefInfo::NoModRef;
771
772 return AAResultBase::getArgModRefInfo(Call, ArgIdx);
773}
774
775#ifndef NDEBUG1
776static const Function *getParent(const Value *V) {
777 if (const Instruction *inst = dyn_cast<Instruction>(V)) {
778 if (!inst->getParent())
779 return nullptr;
780 return inst->getParent()->getParent();
781 }
782
783 if (const Argument *arg = dyn_cast<Argument>(V))
784 return arg->getParent();
785
786 return nullptr;
787}
788
789static bool notDifferentParent(const Value *O1, const Value *O2) {
790
791 const Function *F1 = getParent(O1);
792 const Function *F2 = getParent(O2);
793
794 return !F1 || !F2 || F1 == F2;
795}
796#endif
797
798AliasResult BasicAAResult::alias(const MemoryLocation &LocA,
799 const MemoryLocation &LocB,
800 AAQueryInfo &AAQI) {
801 assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&(static_cast<void> (0))
802 "BasicAliasAnalysis doesn't support interprocedural queries.")(static_cast<void> (0));
803 return aliasCheck(LocA.Ptr, LocA.Size, LocB.Ptr, LocB.Size, AAQI);
804}
805
806/// Checks to see if the specified callsite can clobber the specified memory
807/// object.
808///
809/// Since we only look at local properties of this function, we really can't
810/// say much about this query. We do, however, use simple "address taken"
811/// analysis on local objects.
812ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call,
813 const MemoryLocation &Loc,
814 AAQueryInfo &AAQI) {
815 assert(notDifferentParent(Call, Loc.Ptr) &&(static_cast<void> (0))
816 "AliasAnalysis query involving multiple functions!")(static_cast<void> (0));
817
818 const Value *Object = getUnderlyingObject(Loc.Ptr);
819
820 // Calls marked 'tail' cannot read or write allocas from the current frame
821 // because the current frame might be destroyed by the time they run. However,
822 // a tail call may use an alloca with byval. Calling with byval copies the
823 // contents of the alloca into argument registers or stack slots, so there is
824 // no lifetime issue.
825 if (isa<AllocaInst>(Object))
826 if (const CallInst *CI = dyn_cast<CallInst>(Call))
827 if (CI->isTailCall() &&
828 !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal))
829 return ModRefInfo::NoModRef;
830
831 // Stack restore is able to modify unescaped dynamic allocas. Assume it may
832 // modify them even though the alloca is not escaped.
833 if (auto *AI = dyn_cast<AllocaInst>(Object))
834 if (!AI->isStaticAlloca() && isIntrinsicCall(Call, Intrinsic::stackrestore))
835 return ModRefInfo::Mod;
836
837 // If the pointer is to a locally allocated object that does not escape,
838 // then the call can not mod/ref the pointer unless the call takes the pointer
839 // as an argument, and itself doesn't capture it.
840 if (!isa<Constant>(Object) && Call != Object &&
841 isNonEscapingLocalObject(Object, &AAQI.IsCapturedCache)) {
842
843 // Optimistically assume that call doesn't touch Object and check this
844 // assumption in the following loop.
845 ModRefInfo Result = ModRefInfo::NoModRef;
846 bool IsMustAlias = true;
847
848 unsigned OperandNo = 0;
849 for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end();
850 CI != CE; ++CI, ++OperandNo) {
851 // Only look at the no-capture or byval pointer arguments. If this
852 // pointer were passed to arguments that were neither of these, then it
853 // couldn't be no-capture.
854 if (!(*CI)->getType()->isPointerTy() ||
855 (!Call->doesNotCapture(OperandNo) &&
856 OperandNo < Call->getNumArgOperands() &&
857 !Call->isByValArgument(OperandNo)))
858 continue;
859
860 // Call doesn't access memory through this operand, so we don't care
861 // if it aliases with Object.
862 if (Call->doesNotAccessMemory(OperandNo))
863 continue;
864
865 // If this is a no-capture pointer argument, see if we can tell that it
866 // is impossible to alias the pointer we're checking.
867 AliasResult AR = getBestAAResults().alias(
868 MemoryLocation::getBeforeOrAfter(*CI),
869 MemoryLocation::getBeforeOrAfter(Object), AAQI);
870 if (AR != AliasResult::MustAlias)
871 IsMustAlias = false;
872 // Operand doesn't alias 'Object', continue looking for other aliases
873 if (AR == AliasResult::NoAlias)
874 continue;
875 // Operand aliases 'Object', but call doesn't modify it. Strengthen
876 // initial assumption and keep looking in case if there are more aliases.
877 if (Call->onlyReadsMemory(OperandNo)) {
878 Result = setRef(Result);
879 continue;
880 }
881 // Operand aliases 'Object' but call only writes into it.
882 if (Call->doesNotReadMemory(OperandNo)) {
883 Result = setMod(Result);
884 continue;
885 }
886 // This operand aliases 'Object' and call reads and writes into it.
887 // Setting ModRef will not yield an early return below, MustAlias is not
888 // used further.
889 Result = ModRefInfo::ModRef;
890 break;
891 }
892
893 // No operand aliases, reset Must bit. Add below if at least one aliases
894 // and all aliases found are MustAlias.
895 if (isNoModRef(Result))
896 IsMustAlias = false;
897
898 // Early return if we improved mod ref information
899 if (!isModAndRefSet(Result)) {
900 if (isNoModRef(Result))
901 return ModRefInfo::NoModRef;
902 return IsMustAlias ? setMust(Result) : clearMust(Result);
903 }
904 }
905
906 // If the call is malloc/calloc like, we can assume that it doesn't
907 // modify any IR visible value. This is only valid because we assume these
908 // routines do not read values visible in the IR. TODO: Consider special
909 // casing realloc and strdup routines which access only their arguments as
910 // well. Or alternatively, replace all of this with inaccessiblememonly once
911 // that's implemented fully.
912 if (isMallocOrCallocLikeFn(Call, &TLI)) {
913 // Be conservative if the accessed pointer may alias the allocation -
914 // fallback to the generic handling below.
915 if (getBestAAResults().alias(MemoryLocation::getBeforeOrAfter(Call), Loc,
916 AAQI) == AliasResult::NoAlias)
917 return ModRefInfo::NoModRef;
918 }
919
920 // The semantics of memcpy intrinsics either exactly overlap or do not
921 // overlap, i.e., source and destination of any given memcpy are either
922 // no-alias or must-alias.
923 if (auto *Inst = dyn_cast<AnyMemCpyInst>(Call)) {
924 AliasResult SrcAA =
925 getBestAAResults().alias(MemoryLocation::getForSource(Inst), Loc, AAQI);
926 AliasResult DestAA =
927 getBestAAResults().alias(MemoryLocation::getForDest(Inst), Loc, AAQI);
928 // It's also possible for Loc to alias both src and dest, or neither.
929 ModRefInfo rv = ModRefInfo::NoModRef;
930 if (SrcAA != AliasResult::NoAlias)
931 rv = setRef(rv);
932 if (DestAA != AliasResult::NoAlias)
933 rv = setMod(rv);
934 return rv;
935 }
936
937 // Guard intrinsics are marked as arbitrarily writing so that proper control
938 // dependencies are maintained but they never mods any particular memory
939 // location.
940 //
941 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
942 // heap state at the point the guard is issued needs to be consistent in case
943 // the guard invokes the "deopt" continuation.
944 if (isIntrinsicCall(Call, Intrinsic::experimental_guard))
945 return ModRefInfo::Ref;
946 // The same applies to deoptimize which is essentially a guard(false).
947 if (isIntrinsicCall(Call, Intrinsic::experimental_deoptimize))
948 return ModRefInfo::Ref;
949
950 // Like assumes, invariant.start intrinsics were also marked as arbitrarily
951 // writing so that proper control dependencies are maintained but they never
952 // mod any particular memory location visible to the IR.
953 // *Unlike* assumes (which are now modeled as NoModRef), invariant.start
954 // intrinsic is now modeled as reading memory. This prevents hoisting the
955 // invariant.start intrinsic over stores. Consider:
956 // *ptr = 40;
957 // *ptr = 50;
958 // invariant_start(ptr)
959 // int val = *ptr;
960 // print(val);
961 //
962 // This cannot be transformed to:
963 //
964 // *ptr = 40;
965 // invariant_start(ptr)
966 // *ptr = 50;
967 // int val = *ptr;
968 // print(val);
969 //
970 // The transformation will cause the second store to be ignored (based on
971 // rules of invariant.start) and print 40, while the first program always
972 // prints 50.
973 if (isIntrinsicCall(Call, Intrinsic::invariant_start))
974 return ModRefInfo::Ref;
975
976 // The AAResultBase base class has some smarts, lets use them.
977 return AAResultBase::getModRefInfo(Call, Loc, AAQI);
978}
979
980ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call1,
981 const CallBase *Call2,
982 AAQueryInfo &AAQI) {
983 // Guard intrinsics are marked as arbitrarily writing so that proper control
984 // dependencies are maintained but they never mods any particular memory
985 // location.
986 //
987 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
988 // heap state at the point the guard is issued needs to be consistent in case
989 // the guard invokes the "deopt" continuation.
990
991 // NB! This function is *not* commutative, so we special case two
992 // possibilities for guard intrinsics.
993
994 if (isIntrinsicCall(Call1, Intrinsic::experimental_guard))
995 return isModSet(createModRefInfo(getModRefBehavior(Call2)))
996 ? ModRefInfo::Ref
997 : ModRefInfo::NoModRef;
998
999 if (isIntrinsicCall(Call2, Intrinsic::experimental_guard))
1000 return isModSet(createModRefInfo(getModRefBehavior(Call1)))
1001 ? ModRefInfo::Mod
1002 : ModRefInfo::NoModRef;
1003
1004 // The AAResultBase base class has some smarts, lets use them.
1005 return AAResultBase::getModRefInfo(Call1, Call2, AAQI);
1006}
1007
1008/// Return true if we know V to the base address of the corresponding memory
1009/// object. This implies that any address less than V must be out of bounds
1010/// for the underlying object. Note that just being isIdentifiedObject() is
1011/// not enough - For example, a negative offset from a noalias argument or call
1012/// can be inbounds w.r.t the actual underlying object.
1013static bool isBaseOfObject(const Value *V) {
1014 // TODO: We can handle other cases here
1015 // 1) For GC languages, arguments to functions are often required to be
1016 // base pointers.
1017 // 2) Result of allocation routines are often base pointers. Leverage TLI.
1018 return (isa<AllocaInst>(V) || isa<GlobalVariable>(V));
1019}
1020
1021/// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
1022/// another pointer.
1023///
1024/// We know that V1 is a GEP, but we don't know anything about V2.
1025/// UnderlyingV1 is getUnderlyingObject(GEP1), UnderlyingV2 is the same for
1026/// V2.
1027AliasResult BasicAAResult::aliasGEP(
1028 const GEPOperator *GEP1, LocationSize V1Size,
1029 const Value *V2, LocationSize V2Size,
1030 const Value *UnderlyingV1, const Value *UnderlyingV2, AAQueryInfo &AAQI) {
1031 if (!V1Size.hasValue() && !V2Size.hasValue()) {
1032 // TODO: This limitation exists for compile-time reasons. Relax it if we
1033 // can avoid exponential pathological cases.
1034 if (!isa<GEPOperator>(V2))
1035 return AliasResult::MayAlias;
1036
1037 // If both accesses have unknown size, we can only check whether the base
1038 // objects don't alias.
1039 AliasResult BaseAlias = getBestAAResults().alias(
1040 MemoryLocation::getBeforeOrAfter(UnderlyingV1),
1041 MemoryLocation::getBeforeOrAfter(UnderlyingV2), AAQI);
1042 return BaseAlias == AliasResult::NoAlias ? AliasResult::NoAlias
1043 : AliasResult::MayAlias;
1044 }
1045
1046 DecomposedGEP DecompGEP1 = DecomposeGEPExpression(GEP1, DL, &AC, DT);
1
Calling 'BasicAAResult::DecomposeGEPExpression'
1047 DecomposedGEP DecompGEP2 = DecomposeGEPExpression(V2, DL, &AC, DT);
1048
1049 // Don't attempt to analyze the decomposed GEP if index scale is not a
1050 // compile-time constant.
1051 if (!DecompGEP1.HasCompileTimeConstantScale ||
1052 !DecompGEP2.HasCompileTimeConstantScale)
1053 return AliasResult::MayAlias;
1054
1055 assert(DecompGEP1.Base == UnderlyingV1 && DecompGEP2.Base == UnderlyingV2 &&(static_cast<void> (0))
1056 "DecomposeGEPExpression returned a result different from "(static_cast<void> (0))
1057 "getUnderlyingObject")(static_cast<void> (0));
1058
1059 // Subtract the GEP2 pointer from the GEP1 pointer to find out their
1060 // symbolic difference.
1061 DecompGEP1.Offset -= DecompGEP2.Offset;
1062 GetIndexDifference(DecompGEP1.VarIndices, DecompGEP2.VarIndices);
1063
1064 // If an inbounds GEP would have to start from an out of bounds address
1065 // for the two to alias, then we can assume noalias.
1066 if (*DecompGEP1.InBounds && DecompGEP1.VarIndices.empty() &&
1067 V2Size.hasValue() && DecompGEP1.Offset.sge(V2Size.getValue()) &&
1068 isBaseOfObject(DecompGEP2.Base))
1069 return AliasResult::NoAlias;
1070
1071 if (isa<GEPOperator>(V2)) {
1072 // Symmetric case to above.
1073 if (*DecompGEP2.InBounds && DecompGEP1.VarIndices.empty() &&
1074 V1Size.hasValue() && DecompGEP1.Offset.sle(-V1Size.getValue()) &&
1075 isBaseOfObject(DecompGEP1.Base))
1076 return AliasResult::NoAlias;
1077 }
1078
1079 // For GEPs with identical offsets, we can preserve the size and AAInfo
1080 // when performing the alias check on the underlying objects.
1081 if (DecompGEP1.Offset == 0 && DecompGEP1.VarIndices.empty())
1082 return getBestAAResults().alias(
1083 MemoryLocation(UnderlyingV1, V1Size),
1084 MemoryLocation(UnderlyingV2, V2Size), AAQI);
1085
1086 // Do the base pointers alias?
1087 AliasResult BaseAlias = getBestAAResults().alias(
1088 MemoryLocation::getBeforeOrAfter(UnderlyingV1),
1089 MemoryLocation::getBeforeOrAfter(UnderlyingV2), AAQI);
1090
1091 // If we get a No or May, then return it immediately, no amount of analysis
1092 // will improve this situation.
1093 if (BaseAlias != AliasResult::MustAlias) {
1094 assert(BaseAlias == AliasResult::NoAlias ||(static_cast<void> (0))
1095 BaseAlias == AliasResult::MayAlias)(static_cast<void> (0));
1096 return BaseAlias;
1097 }
1098
1099 // If there is a constant difference between the pointers, but the difference
1100 // is less than the size of the associated memory object, then we know
1101 // that the objects are partially overlapping. If the difference is
1102 // greater, we know they do not overlap.
1103 if (DecompGEP1.Offset != 0 && DecompGEP1.VarIndices.empty()) {
1104 APInt &Off = DecompGEP1.Offset;
1105
1106 // Initialize for Off >= 0 (V2 <= GEP1) case.
1107 const Value *LeftPtr = V2;
1108 const Value *RightPtr = GEP1;
1109 LocationSize VLeftSize = V2Size;
1110 LocationSize VRightSize = V1Size;
1111 const bool Swapped = Off.isNegative();
1112
1113 if (Swapped) {
1114 // Swap if we have the situation where:
1115 // + +
1116 // | BaseOffset |
1117 // ---------------->|
1118 // |-->V1Size |-------> V2Size
1119 // GEP1 V2
1120 std::swap(LeftPtr, RightPtr);
1121 std::swap(VLeftSize, VRightSize);
1122 Off = -Off;
1123 }
1124
1125 if (VLeftSize.hasValue()) {
1126 const uint64_t LSize = VLeftSize.getValue();
1127 if (Off.ult(LSize)) {
1128 // Conservatively drop processing if a phi was visited and/or offset is
1129 // too big.
1130 AliasResult AR = AliasResult::PartialAlias;
1131 if (VRightSize.hasValue() && Off.ule(INT32_MAX(2147483647)) &&
1132 (Off + VRightSize.getValue()).ule(LSize)) {
1133 // Memory referenced by right pointer is nested. Save the offset in
1134 // cache. Note that originally offset estimated as GEP1-V2, but
1135 // AliasResult contains the shift that represents GEP1+Offset=V2.
1136 AR.setOffset(-Off.getSExtValue());
1137 AR.swap(Swapped);
1138 }
1139 return AR;
1140 }
1141 return AliasResult::NoAlias;
1142 }
1143 }
1144
1145 if (!DecompGEP1.VarIndices.empty()) {
1146 APInt GCD;
1147 bool AllNonNegative = DecompGEP1.Offset.isNonNegative();
1148 bool AllNonPositive = DecompGEP1.Offset.isNonPositive();
1149 for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
1150 APInt Scale = DecompGEP1.VarIndices[i].Scale;
1151 APInt ScaleForGCD = DecompGEP1.VarIndices[i].Scale;
1152 if (!DecompGEP1.VarIndices[i].IsNSW)
1153 ScaleForGCD = APInt::getOneBitSet(Scale.getBitWidth(),
1154 Scale.countTrailingZeros());
1155
1156 if (i == 0)
1157 GCD = ScaleForGCD.abs();
1158 else
1159 GCD = APIntOps::GreatestCommonDivisor(GCD, ScaleForGCD.abs());
1160
1161 if (AllNonNegative || AllNonPositive) {
1162 // If the Value could change between cycles, then any reasoning about
1163 // the Value this cycle may not hold in the next cycle. We'll just
1164 // give up if we can't determine conditions that hold for every cycle:
1165 const Value *V = DecompGEP1.VarIndices[i].V;
1166 const Instruction *CxtI = DecompGEP1.VarIndices[i].CxtI;
1167
1168 KnownBits Known = computeKnownBits(V, DL, 0, &AC, CxtI, DT);
1169 bool SignKnownZero = Known.isNonNegative();
1170 bool SignKnownOne = Known.isNegative();
1171
1172 // Zero-extension widens the variable, and so forces the sign
1173 // bit to zero.
1174 bool IsZExt = DecompGEP1.VarIndices[i].ZExtBits > 0 || isa<ZExtInst>(V);
1175 SignKnownZero |= IsZExt;
1176 SignKnownOne &= !IsZExt;
1177
1178 AllNonNegative &= (SignKnownZero && Scale.isNonNegative()) ||
1179 (SignKnownOne && Scale.isNonPositive());
1180 AllNonPositive &= (SignKnownZero && Scale.isNonPositive()) ||
1181 (SignKnownOne && Scale.isNonNegative());
1182 }
1183 }
1184
1185 // We now have accesses at two offsets from the same base:
1186 // 1. (...)*GCD + DecompGEP1.Offset with size V1Size
1187 // 2. 0 with size V2Size
1188 // Using arithmetic modulo GCD, the accesses are at
1189 // [ModOffset..ModOffset+V1Size) and [0..V2Size). If the first access fits
1190 // into the range [V2Size..GCD), then we know they cannot overlap.
1191 APInt ModOffset = DecompGEP1.Offset.srem(GCD);
1192 if (ModOffset.isNegative())
1193 ModOffset += GCD; // We want mod, not rem.
1194 if (V1Size.hasValue() && V2Size.hasValue() &&
1195 ModOffset.uge(V2Size.getValue()) &&
1196 (GCD - ModOffset).uge(V1Size.getValue()))
1197 return AliasResult::NoAlias;
1198
1199 // If we know all the variables are non-negative, then the total offset is
1200 // also non-negative and >= DecompGEP1.Offset. We have the following layout:
1201 // [0, V2Size) ... [TotalOffset, TotalOffer+V1Size]
1202 // If DecompGEP1.Offset >= V2Size, the accesses don't alias.
1203 if (AllNonNegative && V2Size.hasValue() &&
1204 DecompGEP1.Offset.uge(V2Size.getValue()))
1205 return AliasResult::NoAlias;
1206 // Similarly, if the variables are non-positive, then the total offset is
1207 // also non-positive and <= DecompGEP1.Offset. We have the following layout:
1208 // [TotalOffset, TotalOffset+V1Size) ... [0, V2Size)
1209 // If -DecompGEP1.Offset >= V1Size, the accesses don't alias.
1210 if (AllNonPositive && V1Size.hasValue() &&
1211 (-DecompGEP1.Offset).uge(V1Size.getValue()))
1212 return AliasResult::NoAlias;
1213
1214 if (V1Size.hasValue() && V2Size.hasValue()) {
1215 // Try to determine whether abs(VarIndex) > 0.
1216 Optional<APInt> MinAbsVarIndex;
1217 if (DecompGEP1.VarIndices.size() == 1) {
1218 // VarIndex = Scale*V. If V != 0 then abs(VarIndex) >= abs(Scale).
1219 const VariableGEPIndex &Var = DecompGEP1.VarIndices[0];
1220 if (isKnownNonZero(Var.V, DL, 0, &AC, Var.CxtI, DT))
1221 MinAbsVarIndex = Var.Scale.abs();
1222 } else if (DecompGEP1.VarIndices.size() == 2) {
1223 // VarIndex = Scale*V0 + (-Scale)*V1.
1224 // If V0 != V1 then abs(VarIndex) >= abs(Scale).
1225 // Check that VisitedPhiBBs is empty, to avoid reasoning about
1226 // inequality of values across loop iterations.
1227 const VariableGEPIndex &Var0 = DecompGEP1.VarIndices[0];
1228 const VariableGEPIndex &Var1 = DecompGEP1.VarIndices[1];
1229 if (Var0.Scale == -Var1.Scale && Var0.ZExtBits == Var1.ZExtBits &&
1230 Var0.SExtBits == Var1.SExtBits && VisitedPhiBBs.empty() &&
1231 isKnownNonEqual(Var0.V, Var1.V, DL, &AC, /* CxtI */ nullptr, DT))
1232 MinAbsVarIndex = Var0.Scale.abs();
1233 }
1234
1235 if (MinAbsVarIndex) {
1236 // The constant offset will have added at least +/-MinAbsVarIndex to it.
1237 APInt OffsetLo = DecompGEP1.Offset - *MinAbsVarIndex;
1238 APInt OffsetHi = DecompGEP1.Offset + *MinAbsVarIndex;
1239 // Check that an access at OffsetLo or lower, and an access at OffsetHi
1240 // or higher both do not alias.
1241 if (OffsetLo.isNegative() && (-OffsetLo).uge(V1Size.getValue()) &&
1242 OffsetHi.isNonNegative() && OffsetHi.uge(V2Size.getValue()))
1243 return AliasResult::NoAlias;
1244 }
1245 }
1246
1247 if (constantOffsetHeuristic(DecompGEP1.VarIndices, V1Size, V2Size,
1248 DecompGEP1.Offset, &AC, DT))
1249 return AliasResult::NoAlias;
1250 }
1251
1252 // Statically, we can see that the base objects are the same, but the
1253 // pointers have dynamic offsets which we can't resolve. And none of our
1254 // little tricks above worked.
1255 return AliasResult::MayAlias;
1256}
1257
1258static AliasResult MergeAliasResults(AliasResult A, AliasResult B) {
1259 // If the results agree, take it.
1260 if (A == B)
1261 return A;
1262 // A mix of PartialAlias and MustAlias is PartialAlias.
1263 if ((A == AliasResult::PartialAlias && B == AliasResult::MustAlias) ||
1264 (B == AliasResult::PartialAlias && A == AliasResult::MustAlias))
1265 return AliasResult::PartialAlias;
1266 // Otherwise, we don't know anything.
1267 return AliasResult::MayAlias;
1268}
1269
1270/// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
1271/// against another.
1272AliasResult
1273BasicAAResult::aliasSelect(const SelectInst *SI, LocationSize SISize,
1274 const Value *V2, LocationSize V2Size,
1275 AAQueryInfo &AAQI) {
1276 // If the values are Selects with the same condition, we can do a more precise
1277 // check: just check for aliases between the values on corresponding arms.
1278 if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
1279 if (SI->getCondition() == SI2->getCondition()) {
1280 AliasResult Alias = getBestAAResults().alias(
1281 MemoryLocation(SI->getTrueValue(), SISize),
1282 MemoryLocation(SI2->getTrueValue(), V2Size), AAQI);
1283 if (Alias == AliasResult::MayAlias)
1284 return AliasResult::MayAlias;
1285 AliasResult ThisAlias = getBestAAResults().alias(
1286 MemoryLocation(SI->getFalseValue(), SISize),
1287 MemoryLocation(SI2->getFalseValue(), V2Size), AAQI);
1288 return MergeAliasResults(ThisAlias, Alias);
1289 }
1290
1291 // If both arms of the Select node NoAlias or MustAlias V2, then returns
1292 // NoAlias / MustAlias. Otherwise, returns MayAlias.
1293 AliasResult Alias = getBestAAResults().alias(
1294 MemoryLocation(V2, V2Size),
1295 MemoryLocation(SI->getTrueValue(), SISize), AAQI);
1296 if (Alias == AliasResult::MayAlias)
1297 return AliasResult::MayAlias;
1298
1299 AliasResult ThisAlias = getBestAAResults().alias(
1300 MemoryLocation(V2, V2Size),
1301 MemoryLocation(SI->getFalseValue(), SISize), AAQI);
1302 return MergeAliasResults(ThisAlias, Alias);
1303}
1304
1305/// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
1306/// another.
1307AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize,
1308 const Value *V2, LocationSize V2Size,
1309 AAQueryInfo &AAQI) {
1310 if (!PN->getNumIncomingValues())
1311 return AliasResult::NoAlias;
1312 // If the values are PHIs in the same block, we can do a more precise
1313 // as well as efficient check: just check for aliases between the values
1314 // on corresponding edges.
1315 if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
1316 if (PN2->getParent() == PN->getParent()) {
1317 Optional<AliasResult> Alias;
1318 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1319 AliasResult ThisAlias = getBestAAResults().alias(
1320 MemoryLocation(PN->getIncomingValue(i), PNSize),
1321 MemoryLocation(
1322 PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)), V2Size),
1323 AAQI);
1324 if (Alias)
1325 *Alias = MergeAliasResults(*Alias, ThisAlias);
1326 else
1327 Alias = ThisAlias;
1328 if (*Alias == AliasResult::MayAlias)
1329 break;
1330 }
1331 return *Alias;
1332 }
1333
1334 SmallVector<Value *, 4> V1Srcs;
1335 // If a phi operand recurses back to the phi, we can still determine NoAlias
1336 // if we don't alias the underlying objects of the other phi operands, as we
1337 // know that the recursive phi needs to be based on them in some way.
1338 bool isRecursive = false;
1339 auto CheckForRecPhi = [&](Value *PV) {
1340 if (!EnableRecPhiAnalysis)
1341 return false;
1342 if (getUnderlyingObject(PV) == PN) {
1343 isRecursive = true;
1344 return true;
1345 }
1346 return false;
1347 };
1348
1349 if (PV) {
1350 // If we have PhiValues then use it to get the underlying phi values.
1351 const PhiValues::ValueSet &PhiValueSet = PV->getValuesForPhi(PN);
1352 // If we have more phi values than the search depth then return MayAlias
1353 // conservatively to avoid compile time explosion. The worst possible case
1354 // is if both sides are PHI nodes. In which case, this is O(m x n) time
1355 // where 'm' and 'n' are the number of PHI sources.
1356 if (PhiValueSet.size() > MaxLookupSearchDepth)
1357 return AliasResult::MayAlias;
1358 // Add the values to V1Srcs
1359 for (Value *PV1 : PhiValueSet) {
1360 if (CheckForRecPhi(PV1))
1361 continue;
1362 V1Srcs.push_back(PV1);
1363 }
1364 } else {
1365 // If we don't have PhiInfo then just look at the operands of the phi itself
1366 // FIXME: Remove this once we can guarantee that we have PhiInfo always
1367 SmallPtrSet<Value *, 4> UniqueSrc;
1368 Value *OnePhi = nullptr;
1369 for (Value *PV1 : PN->incoming_values()) {
1370 if (isa<PHINode>(PV1)) {
1371 if (OnePhi && OnePhi != PV1) {
1372 // To control potential compile time explosion, we choose to be
1373 // conserviate when we have more than one Phi input. It is important
1374 // that we handle the single phi case as that lets us handle LCSSA
1375 // phi nodes and (combined with the recursive phi handling) simple
1376 // pointer induction variable patterns.
1377 return AliasResult::MayAlias;
1378 }
1379 OnePhi = PV1;
1380 }
1381
1382 if (CheckForRecPhi(PV1))
1383 continue;
1384
1385 if (UniqueSrc.insert(PV1).second)
1386 V1Srcs.push_back(PV1);
1387 }
1388
1389 if (OnePhi && UniqueSrc.size() > 1)
1390 // Out of an abundance of caution, allow only the trivial lcssa and
1391 // recursive phi cases.
1392 return AliasResult::MayAlias;
1393 }
1394
1395 // If V1Srcs is empty then that means that the phi has no underlying non-phi
1396 // value. This should only be possible in blocks unreachable from the entry
1397 // block, but return MayAlias just in case.
1398 if (V1Srcs.empty())
1399 return AliasResult::MayAlias;
1400
1401 // If this PHI node is recursive, indicate that the pointer may be moved
1402 // across iterations. We can only prove NoAlias if different underlying
1403 // objects are involved.
1404 if (isRecursive)
1405 PNSize = LocationSize::beforeOrAfterPointer();
1406
1407 // In the recursive alias queries below, we may compare values from two
1408 // different loop iterations. Keep track of visited phi blocks, which will
1409 // be used when determining value equivalence.
1410 bool BlockInserted = VisitedPhiBBs.insert(PN->getParent()).second;
1411 auto _ = make_scope_exit([&]() {
1412 if (BlockInserted)
1413 VisitedPhiBBs.erase(PN->getParent());
1414 });
1415
1416 // If we inserted a block into VisitedPhiBBs, alias analysis results that
1417 // have been cached earlier may no longer be valid. Perform recursive queries
1418 // with a new AAQueryInfo.
1419 AAQueryInfo NewAAQI = AAQI.withEmptyCache();
1420 AAQueryInfo *UseAAQI = BlockInserted ? &NewAAQI : &AAQI;
1421
1422 AliasResult Alias = getBestAAResults().alias(
1423 MemoryLocation(V2, V2Size),
1424 MemoryLocation(V1Srcs[0], PNSize), *UseAAQI);
1425
1426 // Early exit if the check of the first PHI source against V2 is MayAlias.
1427 // Other results are not possible.
1428 if (Alias == AliasResult::MayAlias)
1429 return AliasResult::MayAlias;
1430 // With recursive phis we cannot guarantee that MustAlias/PartialAlias will
1431 // remain valid to all elements and needs to conservatively return MayAlias.
1432 if (isRecursive && Alias != AliasResult::NoAlias)
1433 return AliasResult::MayAlias;
1434
1435 // If all sources of the PHI node NoAlias or MustAlias V2, then returns
1436 // NoAlias / MustAlias. Otherwise, returns MayAlias.
1437 for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
1438 Value *V = V1Srcs[i];
1439
1440 AliasResult ThisAlias = getBestAAResults().alias(
1441 MemoryLocation(V2, V2Size), MemoryLocation(V, PNSize), *UseAAQI);
1442 Alias = MergeAliasResults(ThisAlias, Alias);
1443 if (Alias == AliasResult::MayAlias)
1444 break;
1445 }
1446
1447 return Alias;
1448}
1449
1450/// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as
1451/// array references.
1452AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size,
1453 const Value *V2, LocationSize V2Size,
1454 AAQueryInfo &AAQI) {
1455 // If either of the memory references is empty, it doesn't matter what the
1456 // pointer values are.
1457 if (V1Size.isZero() || V2Size.isZero())
1458 return AliasResult::NoAlias;
1459
1460 // Strip off any casts if they exist.
1461 V1 = V1->stripPointerCastsForAliasAnalysis();
1462 V2 = V2->stripPointerCastsForAliasAnalysis();
1463
1464 // If V1 or V2 is undef, the result is NoAlias because we can always pick a
1465 // value for undef that aliases nothing in the program.
1466 if (isa<UndefValue>(V1) || isa<UndefValue>(V2))
1467 return AliasResult::NoAlias;
1468
1469 // Are we checking for alias of the same value?
1470 // Because we look 'through' phi nodes, we could look at "Value" pointers from
1471 // different iterations. We must therefore make sure that this is not the
1472 // case. The function isValueEqualInPotentialCycles ensures that this cannot
1473 // happen by looking at the visited phi nodes and making sure they cannot
1474 // reach the value.
1475 if (isValueEqualInPotentialCycles(V1, V2))
1476 return AliasResult::MustAlias;
1477
1478 if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy())
1479 return AliasResult::NoAlias; // Scalars cannot alias each other
1480
1481 // Figure out what objects these things are pointing to if we can.
1482 const Value *O1 = getUnderlyingObject(V1, MaxLookupSearchDepth);
1483 const Value *O2 = getUnderlyingObject(V2, MaxLookupSearchDepth);
1484
1485 // Null values in the default address space don't point to any object, so they
1486 // don't alias any other pointer.
1487 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1))
1488 if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1489 return AliasResult::NoAlias;
1490 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2))
1491 if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1492 return AliasResult::NoAlias;
1493
1494 if (O1 != O2) {
1495 // If V1/V2 point to two different objects, we know that we have no alias.
1496 if (isIdentifiedObject(O1) && isIdentifiedObject(O2))
1497 return AliasResult::NoAlias;
1498
1499 // Constant pointers can't alias with non-const isIdentifiedObject objects.
1500 if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) ||
1501 (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1)))
1502 return AliasResult::NoAlias;
1503
1504 // Function arguments can't alias with things that are known to be
1505 // unambigously identified at the function level.
1506 if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) ||
1507 (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1)))
1508 return AliasResult::NoAlias;
1509
1510 // If one pointer is the result of a call/invoke or load and the other is a
1511 // non-escaping local object within the same function, then we know the
1512 // object couldn't escape to a point where the call could return it.
1513 //
1514 // Note that if the pointers are in different functions, there are a
1515 // variety of complications. A call with a nocapture argument may still
1516 // temporary store the nocapture argument's value in a temporary memory
1517 // location if that memory location doesn't escape. Or it may pass a
1518 // nocapture value to other functions as long as they don't capture it.
1519 if (isEscapeSource(O1) &&
1520 isNonEscapingLocalObject(O2, &AAQI.IsCapturedCache))
1521 return AliasResult::NoAlias;
1522 if (isEscapeSource(O2) &&
1523 isNonEscapingLocalObject(O1, &AAQI.IsCapturedCache))
1524 return AliasResult::NoAlias;
1525 }
1526
1527 // If the size of one access is larger than the entire object on the other
1528 // side, then we know such behavior is undefined and can assume no alias.
1529 bool NullIsValidLocation = NullPointerIsDefined(&F);
1530 if ((isObjectSmallerThan(
1531 O2, getMinimalExtentFrom(*V1, V1Size, DL, NullIsValidLocation), DL,
1532 TLI, NullIsValidLocation)) ||
1533 (isObjectSmallerThan(
1534 O1, getMinimalExtentFrom(*V2, V2Size, DL, NullIsValidLocation), DL,
1535 TLI, NullIsValidLocation)))
1536 return AliasResult::NoAlias;
1537
1538 // If one the accesses may be before the accessed pointer, canonicalize this
1539 // by using unknown after-pointer sizes for both accesses. This is
1540 // equivalent, because regardless of which pointer is lower, one of them
1541 // will always came after the other, as long as the underlying objects aren't
1542 // disjoint. We do this so that the rest of BasicAA does not have to deal
1543 // with accesses before the base pointer, and to improve cache utilization by
1544 // merging equivalent states.
1545 if (V1Size.mayBeBeforePointer() || V2Size.mayBeBeforePointer()) {
1546 V1Size = LocationSize::afterPointer();
1547 V2Size = LocationSize::afterPointer();
1548 }
1549
1550 // FIXME: If this depth limit is hit, then we may cache sub-optimal results
1551 // for recursive queries. For this reason, this limit is chosen to be large
1552 // enough to be very rarely hit, while still being small enough to avoid
1553 // stack overflows.
1554 if (AAQI.Depth >= 512)
1555 return AliasResult::MayAlias;
1556
1557 // Check the cache before climbing up use-def chains. This also terminates
1558 // otherwise infinitely recursive queries.
1559 AAQueryInfo::LocPair Locs({V1, V1Size}, {V2, V2Size});
1560 const bool Swapped = V1 > V2;
1561 if (Swapped)
1562 std::swap(Locs.first, Locs.second);
1563 const auto &Pair = AAQI.AliasCache.try_emplace(
1564 Locs, AAQueryInfo::CacheEntry{AliasResult::NoAlias, 0});
1565 if (!Pair.second) {
1566 auto &Entry = Pair.first->second;
1567 if (!Entry.isDefinitive()) {
1568 // Remember that we used an assumption.
1569 ++Entry.NumAssumptionUses;
1570 ++AAQI.NumAssumptionUses;
1571 }
1572 // Cache contains sorted {V1,V2} pairs but we should return original order.
1573 auto Result = Entry.Result;
1574 Result.swap(Swapped);
1575 return Result;
1576 }
1577
1578 int OrigNumAssumptionUses = AAQI.NumAssumptionUses;
1579 unsigned OrigNumAssumptionBasedResults = AAQI.AssumptionBasedResults.size();
1580 AliasResult Result =
1581 aliasCheckRecursive(V1, V1Size, V2, V2Size, AAQI, O1, O2);
1582
1583 auto It = AAQI.AliasCache.find(Locs);
1584 assert(It != AAQI.AliasCache.end() && "Must be in cache")(static_cast<void> (0));
1585 auto &Entry = It->second;
1586
1587 // Check whether a NoAlias assumption has been used, but disproven.
1588 bool AssumptionDisproven =
1589 Entry.NumAssumptionUses > 0 && Result != AliasResult::NoAlias;
1590 if (AssumptionDisproven)
1591 Result = AliasResult::MayAlias;
1592
1593 // This is a definitive result now, when considered as a root query.
1594 AAQI.NumAssumptionUses -= Entry.NumAssumptionUses;
1595 Entry.Result = Result;
1596 // Cache contains sorted {V1,V2} pairs.
1597 Entry.Result.swap(Swapped);
1598 Entry.NumAssumptionUses = -1;
1599
1600 // If the assumption has been disproven, remove any results that may have
1601 // been based on this assumption. Do this after the Entry updates above to
1602 // avoid iterator invalidation.
1603 if (AssumptionDisproven)
1604 while (AAQI.AssumptionBasedResults.size() > OrigNumAssumptionBasedResults)
1605 AAQI.AliasCache.erase(AAQI.AssumptionBasedResults.pop_back_val());
1606
1607 // The result may still be based on assumptions higher up in the chain.
1608 // Remember it, so it can be purged from the cache later.
1609 if (OrigNumAssumptionUses != AAQI.NumAssumptionUses &&
1610 Result != AliasResult::MayAlias)
1611 AAQI.AssumptionBasedResults.push_back(Locs);
1612 return Result;
1613}
1614
1615AliasResult BasicAAResult::aliasCheckRecursive(
1616 const Value *V1, LocationSize V1Size,
1617 const Value *V2, LocationSize V2Size,
1618 AAQueryInfo &AAQI, const Value *O1, const Value *O2) {
1619 if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
1620 AliasResult Result = aliasGEP(GV1, V1Size, V2, V2Size, O1, O2, AAQI);
1621 if (Result != AliasResult::MayAlias)
1622 return Result;
1623 } else if (const GEPOperator *GV2 = dyn_cast<GEPOperator>(V2)) {
1624 AliasResult Result = aliasGEP(GV2, V2Size, V1, V1Size, O2, O1, AAQI);
1625 if (Result != AliasResult::MayAlias)
1626 return Result;
1627 }
1628
1629 if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
1630 AliasResult Result = aliasPHI(PN, V1Size, V2, V2Size, AAQI);
1631 if (Result != AliasResult::MayAlias)
1632 return Result;
1633 } else if (const PHINode *PN = dyn_cast<PHINode>(V2)) {
1634 AliasResult Result = aliasPHI(PN, V2Size, V1, V1Size, AAQI);
1635 if (Result != AliasResult::MayAlias)
1636 return Result;
1637 }
1638
1639 if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
1640 AliasResult Result = aliasSelect(S1, V1Size, V2, V2Size, AAQI);
1641 if (Result != AliasResult::MayAlias)
1642 return Result;
1643 } else if (const SelectInst *S2 = dyn_cast<SelectInst>(V2)) {
1644 AliasResult Result = aliasSelect(S2, V2Size, V1, V1Size, AAQI);
1645 if (Result != AliasResult::MayAlias)
1646 return Result;
1647 }
1648
1649 // If both pointers are pointing into the same object and one of them
1650 // accesses the entire object, then the accesses must overlap in some way.
1651 if (O1 == O2) {
1652 bool NullIsValidLocation = NullPointerIsDefined(&F);
1653 if (V1Size.isPrecise() && V2Size.isPrecise() &&
1654 (isObjectSize(O1, V1Size.getValue(), DL, TLI, NullIsValidLocation) ||
1655 isObjectSize(O2, V2Size.getValue(), DL, TLI, NullIsValidLocation)))
1656 return AliasResult::PartialAlias;
1657 }
1658
1659 return AliasResult::MayAlias;
1660}
1661
1662/// Check whether two Values can be considered equivalent.
1663///
1664/// In addition to pointer equivalence of \p V1 and \p V2 this checks whether
1665/// they can not be part of a cycle in the value graph by looking at all
1666/// visited phi nodes an making sure that the phis cannot reach the value. We
1667/// have to do this because we are looking through phi nodes (That is we say
1668/// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
1669bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V,
1670 const Value *V2) {
1671 if (V != V2)
1672 return false;
1673
1674 const Instruction *Inst = dyn_cast<Instruction>(V);
1675 if (!Inst)
1676 return true;
1677
1678 if (VisitedPhiBBs.empty())
1679 return true;
1680
1681 if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck)
1682 return false;
1683
1684 // Make sure that the visited phis cannot reach the Value. This ensures that
1685 // the Values cannot come from different iterations of a potential cycle the
1686 // phi nodes could be involved in.
1687 for (auto *P : VisitedPhiBBs)
1688 if (isPotentiallyReachable(&P->front(), Inst, nullptr, DT))
1689 return false;
1690
1691 return true;
1692}
1693
1694/// Computes the symbolic difference between two de-composed GEPs.
1695///
1696/// Dest and Src are the variable indices from two decomposed GetElementPtr
1697/// instructions GEP1 and GEP2 which have common base pointers.
1698void BasicAAResult::GetIndexDifference(
1699 SmallVectorImpl<VariableGEPIndex> &Dest,
1700 const SmallVectorImpl<VariableGEPIndex> &Src) {
1701 if (Src.empty())
1702 return;
1703
1704 for (unsigned i = 0, e = Src.size(); i != e; ++i) {
1705 const Value *V = Src[i].V;
1706 unsigned ZExtBits = Src[i].ZExtBits, SExtBits = Src[i].SExtBits;
1707 APInt Scale = Src[i].Scale;
1708
1709 // Find V in Dest. This is N^2, but pointer indices almost never have more
1710 // than a few variable indexes.
1711 for (unsigned j = 0, e = Dest.size(); j != e; ++j) {
1712 if (!isValueEqualInPotentialCycles(Dest[j].V, V) ||
1713 Dest[j].ZExtBits != ZExtBits || Dest[j].SExtBits != SExtBits)
1714 continue;
1715
1716 // If we found it, subtract off Scale V's from the entry in Dest. If it
1717 // goes to zero, remove the entry.
1718 if (Dest[j].Scale != Scale) {
1719 Dest[j].Scale -= Scale;
1720 Dest[j].IsNSW = false;
1721 } else
1722 Dest.erase(Dest.begin() + j);
1723 Scale = 0;
1724 break;
1725 }
1726
1727 // If we didn't consume this entry, add it to the end of the Dest list.
1728 if (!!Scale) {
1729 VariableGEPIndex Entry = {V, ZExtBits, SExtBits,
1730 -Scale, Src[i].CxtI, Src[i].IsNSW};
1731 Dest.push_back(Entry);
1732 }
1733 }
1734}
1735
1736bool BasicAAResult::constantOffsetHeuristic(
1737 const SmallVectorImpl<VariableGEPIndex> &VarIndices,
1738 LocationSize MaybeV1Size, LocationSize MaybeV2Size, const APInt &BaseOffset,
1739 AssumptionCache *AC, DominatorTree *DT) {
1740 if (VarIndices.size() != 2 || !MaybeV1Size.hasValue() ||
1741 !MaybeV2Size.hasValue())
1742 return false;
1743
1744 const uint64_t V1Size = MaybeV1Size.getValue();
1745 const uint64_t V2Size = MaybeV2Size.getValue();
1746
1747 const VariableGEPIndex &Var0 = VarIndices[0], &Var1 = VarIndices[1];
1748
1749 if (Var0.ZExtBits != Var1.ZExtBits || Var0.SExtBits != Var1.SExtBits ||
1750 Var0.Scale != -Var1.Scale || Var0.V->getType() != Var1.V->getType())
1751 return false;
1752
1753 // We'll strip off the Extensions of Var0 and Var1 and do another round
1754 // of GetLinearExpression decomposition. In the example above, if Var0
1755 // is zext(%x + 1) we should get V1 == %x and V1Offset == 1.
1756
1757 LinearExpression E0 =
1758 GetLinearExpression(ExtendedValue(Var0.V), DL, 0, AC, DT);
1759 LinearExpression E1 =
1760 GetLinearExpression(ExtendedValue(Var1.V), DL, 0, AC, DT);
1761 if (E0.Scale != E1.Scale || E0.Val.ZExtBits != E1.Val.ZExtBits ||
1762 E0.Val.SExtBits != E1.Val.SExtBits ||
1763 !isValueEqualInPotentialCycles(E0.Val.V, E1.Val.V))
1764 return false;
1765
1766 // We have a hit - Var0 and Var1 only differ by a constant offset!
1767
1768 // If we've been sext'ed then zext'd the maximum difference between Var0 and
1769 // Var1 is possible to calculate, but we're just interested in the absolute
1770 // minimum difference between the two. The minimum distance may occur due to
1771 // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so
1772 // the minimum distance between %i and %i + 5 is 3.
1773 APInt MinDiff = E0.Offset - E1.Offset, Wrapped = -MinDiff;
1774 MinDiff = APIntOps::umin(MinDiff, Wrapped);
1775 APInt MinDiffBytes =
1776 MinDiff.zextOrTrunc(Var0.Scale.getBitWidth()) * Var0.Scale.abs();
1777
1778 // We can't definitely say whether GEP1 is before or after V2 due to wrapping
1779 // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other
1780 // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and
1781 // V2Size can fit in the MinDiffBytes gap.
1782 return MinDiffBytes.uge(V1Size + BaseOffset.abs()) &&
1783 MinDiffBytes.uge(V2Size + BaseOffset.abs());
1784}
1785
1786//===----------------------------------------------------------------------===//
1787// BasicAliasAnalysis Pass
1788//===----------------------------------------------------------------------===//
1789
1790AnalysisKey BasicAA::Key;
1791
1792BasicAAResult BasicAA::run(Function &F, FunctionAnalysisManager &AM) {
1793 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
1794 auto &AC = AM.getResult<AssumptionAnalysis>(F);
1795 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
1796 auto *PV = AM.getCachedResult<PhiValuesAnalysis>(F);
1797 return BasicAAResult(F.getParent()->getDataLayout(), F, TLI, AC, DT, PV);
1798}
1799
1800BasicAAWrapperPass::BasicAAWrapperPass() : FunctionPass(ID) {
1801 initializeBasicAAWrapperPassPass(*PassRegistry::getPassRegistry());
1802}
1803
1804char BasicAAWrapperPass::ID = 0;
1805
1806void BasicAAWrapperPass::anchor() {}
1807
1808INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basic-aa",static void *initializeBasicAAWrapperPassPassOnce(PassRegistry
&Registry) {
1809 "Basic Alias Analysis (stateless AA impl)", true, true)static void *initializeBasicAAWrapperPassPassOnce(PassRegistry
&Registry) {
1810INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)initializeAssumptionCacheTrackerPass(Registry);
1811INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)initializeDominatorTreeWrapperPassPass(Registry);
1812INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)initializeTargetLibraryInfoWrapperPassPass(Registry);
1813INITIALIZE_PASS_DEPENDENCY(PhiValuesWrapperPass)initializePhiValuesWrapperPassPass(Registry);
1814INITIALIZE_PASS_END(BasicAAWrapperPass, "basic-aa",PassInfo *PI = new PassInfo( "Basic Alias Analysis (stateless AA impl)"
, "basic-aa", &BasicAAWrapperPass::ID, PassInfo::NormalCtor_t
(callDefaultCtor<BasicAAWrapperPass>), true, true); Registry
.registerPass(*PI, true); return PI; } static llvm::once_flag
InitializeBasicAAWrapperPassPassFlag; void llvm::initializeBasicAAWrapperPassPass
(PassRegistry &Registry) { llvm::call_once(InitializeBasicAAWrapperPassPassFlag
, initializeBasicAAWrapperPassPassOnce, std::ref(Registry)); }
1815 "Basic Alias Analysis (stateless AA impl)", true, true)PassInfo *PI = new PassInfo( "Basic Alias Analysis (stateless AA impl)"
, "basic-aa", &BasicAAWrapperPass::ID, PassInfo::NormalCtor_t
(callDefaultCtor<BasicAAWrapperPass>), true, true); Registry
.registerPass(*PI, true); return PI; } static llvm::once_flag
InitializeBasicAAWrapperPassPassFlag; void llvm::initializeBasicAAWrapperPassPass
(PassRegistry &Registry) { llvm::call_once(InitializeBasicAAWrapperPassPassFlag
, initializeBasicAAWrapperPassPassOnce, std::ref(Registry)); }
1816
1817FunctionPass *llvm::createBasicAAWrapperPass() {
1818 return new BasicAAWrapperPass();
1819}
1820
1821bool BasicAAWrapperPass::runOnFunction(Function &F) {
1822 auto &ACT = getAnalysis<AssumptionCacheTracker>();
1823 auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
1824 auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
1825 auto *PVWP = getAnalysisIfAvailable<PhiValuesWrapperPass>();
1826
1827 Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), F,
1828 TLIWP.getTLI(F), ACT.getAssumptionCache(F),
1829 &DTWP.getDomTree(),
1830 PVWP ? &PVWP->getResult() : nullptr));
1831
1832 return false;
1833}
1834
1835void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
1836 AU.setPreservesAll();
1837 AU.addRequiredTransitive<AssumptionCacheTracker>();
1838 AU.addRequiredTransitive<DominatorTreeWrapperPass>();
1839 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>();
1840 AU.addUsedIfAvailable<PhiValuesWrapperPass>();
1841}
1842
1843BasicAAResult llvm::createLegacyPMBasicAAResult(Pass &P, Function &F) {
1844 return BasicAAResult(
1845 F.getParent()->getDataLayout(), F,
1846 P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F),
1847 P.getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F));
1848}

/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/include/llvm/ADT/APInt.h

1//===-- llvm/ADT/APInt.h - For Arbitrary Precision Integer -----*- C++ -*--===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file implements a class to represent arbitrary precision
11/// integral constant values and operations on them.
12///
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_ADT_APINT_H
16#define LLVM_ADT_APINT_H
17
18#include "llvm/Support/Compiler.h"
19#include "llvm/Support/MathExtras.h"
20#include <cassert>
21#include <climits>
22#include <cstring>
23#include <utility>
24
25namespace llvm {
26class FoldingSetNodeID;
27class StringRef;
28class hash_code;
29class raw_ostream;
30
31template <typename T> class SmallVectorImpl;
32template <typename T> class ArrayRef;
33template <typename T> class Optional;
34template <typename T> struct DenseMapInfo;
35
36class APInt;
37
38inline APInt operator-(APInt);
39
40//===----------------------------------------------------------------------===//
41// APInt Class
42//===----------------------------------------------------------------------===//
43
44/// Class for arbitrary precision integers.
45///
46/// APInt is a functional replacement for common case unsigned integer type like
47/// "unsigned", "unsigned long" or "uint64_t", but also allows non-byte-width
48/// integer sizes and large integer value types such as 3-bits, 15-bits, or more
49/// than 64-bits of precision. APInt provides a variety of arithmetic operators
50/// and methods to manipulate integer values of any bit-width. It supports both
51/// the typical integer arithmetic and comparison operations as well as bitwise
52/// manipulation.
53///
54/// The class has several invariants worth noting:
55/// * All bit, byte, and word positions are zero-based.
56/// * Once the bit width is set, it doesn't change except by the Truncate,
57/// SignExtend, or ZeroExtend operations.
58/// * All binary operators must be on APInt instances of the same bit width.
59/// Attempting to use these operators on instances with different bit
60/// widths will yield an assertion.
61/// * The value is stored canonically as an unsigned value. For operations
62/// where it makes a difference, there are both signed and unsigned variants
63/// of the operation. For example, sdiv and udiv. However, because the bit
64/// widths must be the same, operations such as Mul and Add produce the same
65/// results regardless of whether the values are interpreted as signed or
66/// not.
67/// * In general, the class tries to follow the style of computation that LLVM
68/// uses in its IR. This simplifies its use for LLVM.
69///
70class LLVM_NODISCARD[[clang::warn_unused_result]] APInt {
71public:
72 typedef uint64_t WordType;
73
74 /// This enum is used to hold the constants we needed for APInt.
75 enum : unsigned {
76 /// Byte size of a word.
77 APINT_WORD_SIZE = sizeof(WordType),
78 /// Bits in a word.
79 APINT_BITS_PER_WORD = APINT_WORD_SIZE * CHAR_BIT8
80 };
81
82 enum class Rounding {
83 DOWN,
84 TOWARD_ZERO,
85 UP,
86 };
87
88 static constexpr WordType WORDTYPE_MAX = ~WordType(0);
89
90private:
91 /// This union is used to store the integer value. When the
92 /// integer bit-width <= 64, it uses VAL, otherwise it uses pVal.
93 union {
94 uint64_t VAL; ///< Used to store the <= 64 bits integer value.
95 uint64_t *pVal; ///< Used to store the >64 bits integer value.
96 } U;
97
98 unsigned BitWidth; ///< The number of bits in this APInt.
99
100 friend struct DenseMapInfo<APInt>;
101
102 friend class APSInt;
103
104 /// Fast internal constructor
105 ///
106 /// This constructor is used only internally for speed of construction of
107 /// temporaries. It is unsafe for general use so it is not public.
108 APInt(uint64_t *val, unsigned bits) : BitWidth(bits) {
109 U.pVal = val;
110 }
111
112 /// Determine which word a bit is in.
113 ///
114 /// \returns the word position for the specified bit position.
115 static unsigned whichWord(unsigned bitPosition) {
116 return bitPosition / APINT_BITS_PER_WORD;
117 }
118
119 /// Determine which bit in a word a bit is in.
120 ///
121 /// \returns the bit position in a word for the specified bit position
122 /// in the APInt.
123 static unsigned whichBit(unsigned bitPosition) {
124 return bitPosition % APINT_BITS_PER_WORD;
125 }
126
127 /// Get a single bit mask.
128 ///
129 /// \returns a uint64_t with only bit at "whichBit(bitPosition)" set
130 /// This method generates and returns a uint64_t (word) mask for a single
131 /// bit at a specific bit position. This is used to mask the bit in the
132 /// corresponding word.
133 static uint64_t maskBit(unsigned bitPosition) {
134 return 1ULL << whichBit(bitPosition);
135 }
136
137 /// Clear unused high order bits
138 ///
139 /// This method is used internally to clear the top "N" bits in the high order
140 /// word that are not used by the APInt. This is needed after the most
141 /// significant word is assigned a value to ensure that those bits are
142 /// zero'd out.
143 APInt &clearUnusedBits() {
144 // Compute how many bits are used in the final word
145 unsigned WordBits = ((BitWidth-1) % APINT_BITS_PER_WORD) + 1;
146
147 // Mask out the high bits.
148 uint64_t mask = WORDTYPE_MAX >> (APINT_BITS_PER_WORD - WordBits);
149 if (isSingleWord())
150 U.VAL &= mask;
151 else
152 U.pVal[getNumWords() - 1] &= mask;
153 return *this;
154 }
155
156 /// Get the word corresponding to a bit position
157 /// \returns the corresponding word for the specified bit position.
158 uint64_t getWord(unsigned bitPosition) const {
159 return isSingleWord() ? U.VAL : U.pVal[whichWord(bitPosition)];
160 }
161
162 /// Utility method to change the bit width of this APInt to new bit width,
163 /// allocating and/or deallocating as necessary. There is no guarantee on the
164 /// value of any bits upon return. Caller should populate the bits after.
165 void reallocate(unsigned NewBitWidth);
166
167 /// Convert a char array into an APInt
168 ///
169 /// \param radix 2, 8, 10, 16, or 36
170 /// Converts a string into a number. The string must be non-empty
171 /// and well-formed as a number of the given base. The bit-width
172 /// must be sufficient to hold the result.
173 ///
174 /// This is used by the constructors that take string arguments.
175 ///
176 /// StringRef::getAsInteger is superficially similar but (1) does
177 /// not assume that the string is well-formed and (2) grows the
178 /// result to hold the input.
179 void fromString(unsigned numBits, StringRef str, uint8_t radix);
180
181 /// An internal division function for dividing APInts.
182 ///
183 /// This is used by the toString method to divide by the radix. It simply
184 /// provides a more convenient form of divide for internal use since KnuthDiv
185 /// has specific constraints on its inputs. If those constraints are not met
186 /// then it provides a simpler form of divide.
187 static void divide(const WordType *LHS, unsigned lhsWords,
188 const WordType *RHS, unsigned rhsWords, WordType *Quotient,
189 WordType *Remainder);
190
191 /// out-of-line slow case for inline constructor
192 void initSlowCase(uint64_t val, bool isSigned);
193
194 /// shared code between two array constructors
195 void initFromArray(ArrayRef<uint64_t> array);
196
197 /// out-of-line slow case for inline copy constructor
198 void initSlowCase(const APInt &that);
199
200 /// out-of-line slow case for shl
201 void shlSlowCase(unsigned ShiftAmt);
202
203 /// out-of-line slow case for lshr.
204 void lshrSlowCase(unsigned ShiftAmt);
205
206 /// out-of-line slow case for ashr.
207 void ashrSlowCase(unsigned ShiftAmt);
208
209 /// out-of-line slow case for operator=
210 void AssignSlowCase(const APInt &RHS);
211
212 /// out-of-line slow case for operator==
213 bool EqualSlowCase(const APInt &RHS) const LLVM_READONLY__attribute__((__pure__));
214
215 /// out-of-line slow case for countLeadingZeros
216 unsigned countLeadingZerosSlowCase() const LLVM_READONLY__attribute__((__pure__));
217
218 /// out-of-line slow case for countLeadingOnes.
219 unsigned countLeadingOnesSlowCase() const LLVM_READONLY__attribute__((__pure__));
220
221 /// out-of-line slow case for countTrailingZeros.
222 unsigned countTrailingZerosSlowCase() const LLVM_READONLY__attribute__((__pure__));
223
224 /// out-of-line slow case for countTrailingOnes
225 unsigned countTrailingOnesSlowCase() const LLVM_READONLY__attribute__((__pure__));
226
227 /// out-of-line slow case for countPopulation
228 unsigned countPopulationSlowCase() const LLVM_READONLY__attribute__((__pure__));
229
230 /// out-of-line slow case for intersects.
231 bool intersectsSlowCase(const APInt &RHS) const LLVM_READONLY__attribute__((__pure__));
232
233 /// out-of-line slow case for isSubsetOf.
234 bool isSubsetOfSlowCase(const APInt &RHS) const LLVM_READONLY__attribute__((__pure__));
235
236 /// out-of-line slow case for setBits.
237 void setBitsSlowCase(unsigned loBit, unsigned hiBit);
238
239 /// out-of-line slow case for flipAllBits.
240 void flipAllBitsSlowCase();
241
242 /// out-of-line slow case for operator&=.
243 void AndAssignSlowCase(const APInt& RHS);
244
245 /// out-of-line slow case for operator|=.
246 void OrAssignSlowCase(const APInt& RHS);
247
248 /// out-of-line slow case for operator^=.
249 void XorAssignSlowCase(const APInt& RHS);
250
251 /// Unsigned comparison. Returns -1, 0, or 1 if this APInt is less than, equal
252 /// to, or greater than RHS.
253 int compare(const APInt &RHS) const LLVM_READONLY__attribute__((__pure__));
254
255 /// Signed comparison. Returns -1, 0, or 1 if this APInt is less than, equal
256 /// to, or greater than RHS.
257 int compareSigned(const APInt &RHS) const LLVM_READONLY__attribute__((__pure__));
258
259public:
260 /// \name Constructors
261 /// @{
262
263 /// Create a new APInt of numBits width, initialized as val.
264 ///
265 /// If isSigned is true then val is treated as if it were a signed value
266 /// (i.e. as an int64_t) and the appropriate sign extension to the bit width
267 /// will be done. Otherwise, no sign extension occurs (high order bits beyond
268 /// the range of val are zero filled).
269 ///
270 /// \param numBits the bit width of the constructed APInt
271 /// \param val the initial value of the APInt
272 /// \param isSigned how to treat signedness of val
273 APInt(unsigned numBits, uint64_t val, bool isSigned = false)
274 : BitWidth(numBits) {
275 assert(BitWidth && "bitwidth too small")(static_cast<void> (0));
276 if (isSingleWord()) {
277 U.VAL = val;
278 clearUnusedBits();
279 } else {
280 initSlowCase(val, isSigned);
281 }
282 }
283
284 /// Construct an APInt of numBits width, initialized as bigVal[].
285 ///
286 /// Note that bigVal.size() can be smaller or larger than the corresponding
287 /// bit width but any extraneous bits will be dropped.
288 ///
289 /// \param numBits the bit width of the constructed APInt
290 /// \param bigVal a sequence of words to form the initial value of the APInt
291 APInt(unsigned numBits, ArrayRef<uint64_t> bigVal);
292
293 /// Equivalent to APInt(numBits, ArrayRef<uint64_t>(bigVal, numWords)), but
294 /// deprecated because this constructor is prone to ambiguity with the
295 /// APInt(unsigned, uint64_t, bool) constructor.
296 ///
297 /// If this overload is ever deleted, care should be taken to prevent calls
298 /// from being incorrectly captured by the APInt(unsigned, uint64_t, bool)
299 /// constructor.
300 APInt(unsigned numBits, unsigned numWords, const uint64_t bigVal[]);
301
302 /// Construct an APInt from a string representation.
303 ///
304 /// This constructor interprets the string \p str in the given radix. The
305 /// interpretation stops when the first character that is not suitable for the
306 /// radix is encountered, or the end of the string. Acceptable radix values
307 /// are 2, 8, 10, 16, and 36. It is an error for the value implied by the
308 /// string to require more bits than numBits.
309 ///
310 /// \param numBits the bit width of the constructed APInt
311 /// \param str the string to be interpreted
312 /// \param radix the radix to use for the conversion
313 APInt(unsigned numBits, StringRef str, uint8_t radix);
314
315 /// Simply makes *this a copy of that.
316 /// Copy Constructor.
317 APInt(const APInt &that) : BitWidth(that.BitWidth) {
318 if (isSingleWord())
319 U.VAL = that.U.VAL;
320 else
321 initSlowCase(that);
322 }
323
324 /// Move Constructor.
325 APInt(APInt &&that) : BitWidth(that.BitWidth) {
326 memcpy(&U, &that.U, sizeof(U));
327 that.BitWidth = 0;
328 }
329
330 /// Destructor.
331 ~APInt() {
332 if (needsCleanup())
333 delete[] U.pVal;
334 }
335
336 /// Default constructor that creates an uninteresting APInt
337 /// representing a 1-bit zero value.
338 ///
339 /// This is useful for object deserialization (pair this with the static
340 /// method Read).
341 explicit APInt() : BitWidth(1) { U.VAL = 0; }
342
343 /// Returns whether this instance allocated memory.
344 bool needsCleanup() const { return !isSingleWord(); }
345
346 /// Used to insert APInt objects, or objects that contain APInt objects, into
347 /// FoldingSets.
348 void Profile(FoldingSetNodeID &id) const;
349
350 /// @}
351 /// \name Value Tests
352 /// @{
353
354 /// Determine if this APInt just has one word to store value.
355 ///
356 /// \returns true if the number of bits <= 64, false otherwise.
357 bool isSingleWord() const { return BitWidth <= APINT_BITS_PER_WORD; }
58
Assuming field 'BitWidth' is <= APINT_BITS_PER_WORD
59
Returning the value 1, which participates in a condition later
358
359 /// Determine sign of this APInt.
360 ///
361 /// This tests the high bit of this APInt to determine if it is set.
362 ///
363 /// \returns true if this APInt is negative, false otherwise
364 bool isNegative() const { return (*this)[BitWidth - 1]; }
365
366 /// Determine if this APInt Value is non-negative (>= 0)
367 ///
368 /// This tests the high bit of the APInt to determine if it is unset.
369 bool isNonNegative() const { return !isNegative(); }
370
371 /// Determine if sign bit of this APInt is set.
372 ///
373 /// This tests the high bit of this APInt to determine if it is set.
374 ///
375 /// \returns true if this APInt has its sign bit set, false otherwise.
376 bool isSignBitSet() const { return (*this)[BitWidth-1]; }
377
378 /// Determine if sign bit of this APInt is clear.
379 ///
380 /// This tests the high bit of this APInt to determine if it is clear.
381 ///
382 /// \returns true if this APInt has its sign bit clear, false otherwise.
383 bool isSignBitClear() const { return !isSignBitSet(); }
384
385 /// Determine if this APInt Value is positive.
386 ///
387 /// This tests if the value of this APInt is positive (> 0). Note
388 /// that 0 is not a positive value.
389 ///
390 /// \returns true if this APInt is positive.
391 bool isStrictlyPositive() const { return isNonNegative() && !isNullValue(); }
392
393 /// Determine if this APInt Value is non-positive (<= 0).
394 ///
395 /// \returns true if this APInt is non-positive.
396 bool isNonPositive() const { return !isStrictlyPositive(); }
397
398 /// Determine if all bits are set
399 ///
400 /// This checks to see if the value has all bits of the APInt are set or not.
401 bool isAllOnesValue() const {
402 if (isSingleWord())
403 return U.VAL == WORDTYPE_MAX >> (APINT_BITS_PER_WORD - BitWidth);
404 return countTrailingOnesSlowCase() == BitWidth;
405 }
406
407 /// Determine if all bits are clear
408 ///
409 /// This checks to see if the value has all bits of the APInt are clear or
410 /// not.
411 bool isNullValue() const { return !*this; }
412
413 /// Determine if this is a value of 1.
414 ///
415 /// This checks to see if the value of this APInt is one.
416 bool isOneValue() const {
417 if (isSingleWord())
418 return U.VAL == 1;
419 return countLeadingZerosSlowCase() == BitWidth - 1;
420 }
421
422 /// Determine if this is the largest unsigned value.
423 ///
424 /// This checks to see if the value of this APInt is the maximum unsigned
425 /// value for the APInt's bit width.
426 bool isMaxValue() const { return isAllOnesValue(); }
427
428 /// Determine if this is the largest signed value.
429 ///
430 /// This checks to see if the value of this APInt is the maximum signed
431 /// value for the APInt's bit width.
432 bool isMaxSignedValue() const {
433 if (isSingleWord())
434 return U.VAL == ((WordType(1) << (BitWidth - 1)) - 1);
435 return !isNegative() && countTrailingOnesSlowCase() == BitWidth - 1;
436 }
437
438 /// Determine if this is the smallest unsigned value.
439 ///
440 /// This checks to see if the value of this APInt is the minimum unsigned
441 /// value for the APInt's bit width.
442 bool isMinValue() const { return isNullValue(); }
443
444 /// Determine if this is the smallest signed value.
445 ///
446 /// This checks to see if the value of this APInt is the minimum signed
447 /// value for the APInt's bit width.
448 bool isMinSignedValue() const {
449 if (isSingleWord())
450 return U.VAL == (WordType(1) << (BitWidth - 1));
451 return isNegative() && countTrailingZerosSlowCase() == BitWidth - 1;
452 }
453
454 /// Check if this APInt has an N-bits unsigned integer value.
455 bool isIntN(unsigned N) const {
456 assert(N && "N == 0 ???")(static_cast<void> (0));
457 return getActiveBits() <= N;
458 }
459
460 /// Check if this APInt has an N-bits signed integer value.
461 bool isSignedIntN(unsigned N) const {
462 assert(N && "N == 0 ???")(static_cast<void> (0));
463 return getMinSignedBits() <= N;
464 }
465
466 /// Check if this APInt's value is a power of two greater than zero.
467 ///
468 /// \returns true if the argument APInt value is a power of two > 0.
469 bool isPowerOf2() const {
470 if (isSingleWord())
471 return isPowerOf2_64(U.VAL);
472 return countPopulationSlowCase() == 1;
473 }
474
475 /// Check if the APInt's value is returned by getSignMask.
476 ///
477 /// \returns true if this is the value returned by getSignMask.
478 bool isSignMask() const { return isMinSignedValue(); }
479
480 /// Convert APInt to a boolean value.
481 ///
482 /// This converts the APInt to a boolean value as a test against zero.
483 bool getBoolValue() const { return !!*this; }
484
485 /// If this value is smaller than the specified limit, return it, otherwise
486 /// return the limit value. This causes the value to saturate to the limit.
487 uint64_t getLimitedValue(uint64_t Limit = UINT64_MAX(18446744073709551615UL)) const {
488 return ugt(Limit) ? Limit : getZExtValue();
51
Assuming the condition is true
52
'?' condition is true
53
Returning the value 18446744073709551615
489 }
490
491 /// Check if the APInt consists of a repeated bit pattern.
492 ///
493 /// e.g. 0x01010101 satisfies isSplat(8).
494 /// \param SplatSizeInBits The size of the pattern in bits. Must divide bit
495 /// width without remainder.
496 bool isSplat(unsigned SplatSizeInBits) const;
497
498 /// \returns true if this APInt value is a sequence of \param numBits ones
499 /// starting at the least significant bit with the remainder zero.
500 bool isMask(unsigned numBits) const {
501 assert(numBits != 0 && "numBits must be non-zero")(static_cast<void> (0));
502 assert(numBits <= BitWidth && "numBits out of range")(static_cast<void> (0));
503 if (isSingleWord())
504 return U.VAL == (WORDTYPE_MAX >> (APINT_BITS_PER_WORD - numBits));
505 unsigned Ones = countTrailingOnesSlowCase();
506 return (numBits == Ones) &&
507 ((Ones + countLeadingZerosSlowCase()) == BitWidth);
508 }
509
510 /// \returns true if this APInt is a non-empty sequence of ones starting at
511 /// the least significant bit with the remainder zero.
512 /// Ex. isMask(0x0000FFFFU) == true.
513 bool isMask() const {
514 if (isSingleWord())
515 return isMask_64(U.VAL);
516 unsigned Ones = countTrailingOnesSlowCase();
517 return (Ones > 0) && ((Ones + countLeadingZerosSlowCase()) == BitWidth);
518 }
519
520 /// Return true if this APInt value contains a sequence of ones with
521 /// the remainder zero.
522 bool isShiftedMask() const {
523 if (isSingleWord())
524 return isShiftedMask_64(U.VAL);
525 unsigned Ones = countPopulationSlowCase();
526 unsigned LeadZ = countLeadingZerosSlowCase();
527 return (Ones + LeadZ + countTrailingZeros()) == BitWidth;
528 }
529
530 /// @}
531 /// \name Value Generators
532 /// @{
533
534 /// Gets maximum unsigned value of APInt for specific bit width.
535 static APInt getMaxValue(unsigned numBits) {
536 return getAllOnesValue(numBits);
537 }
538
539 /// Gets maximum signed value of APInt for a specific bit width.
540 static APInt getSignedMaxValue(unsigned numBits) {
541 APInt API = getAllOnesValue(numBits);
542 API.clearBit(numBits - 1);
543 return API;
544 }
545
546 /// Gets minimum unsigned value of APInt for a specific bit width.
547 static APInt getMinValue(unsigned numBits) { return APInt(numBits, 0); }
548
549 /// Gets minimum signed value of APInt for a specific bit width.
550 static APInt getSignedMinValue(unsigned numBits) {
551 APInt API(numBits, 0);
552 API.setBit(numBits - 1);
553 return API;
554 }
555
556 /// Get the SignMask for a specific bit width.
557 ///
558 /// This is just a wrapper function of getSignedMinValue(), and it helps code
559 /// readability when we want to get a SignMask.
560 static APInt getSignMask(unsigned BitWidth) {
561 return getSignedMinValue(BitWidth);
562 }
563
564 /// Get the all-ones value.
565 ///
566 /// \returns the all-ones value for an APInt of the specified bit-width.
567 static APInt getAllOnesValue(unsigned numBits) {
568 return APInt(numBits, WORDTYPE_MAX, true);
569 }
570
571 /// Get the '0' value.
572 ///
573 /// \returns the '0' value for an APInt of the specified bit-width.
574 static APInt getNullValue(unsigned numBits) { return APInt(numBits, 0); }
575
576 /// Compute an APInt containing numBits highbits from this APInt.
577 ///
578 /// Get an APInt with the same BitWidth as this APInt, just zero mask
579 /// the low bits and right shift to the least significant bit.
580 ///
581 /// \returns the high "numBits" bits of this APInt.
582 APInt getHiBits(unsigned numBits) const;
583
584 /// Compute an APInt containing numBits lowbits from this APInt.
585 ///
586 /// Get an APInt with the same BitWidth as this APInt, just zero mask
587 /// the high bits.
588 ///
589 /// \returns the low "numBits" bits of this APInt.
590 APInt getLoBits(unsigned numBits) const;
591
592 /// Return an APInt with exactly one bit set in the result.
593 static APInt getOneBitSet(unsigned numBits, unsigned BitNo) {
594 APInt Res(numBits, 0);
595 Res.setBit(BitNo);
596 return Res;
597 }
598
599 /// Get a value with a block of bits set.
600 ///
601 /// Constructs an APInt value that has a contiguous range of bits set. The
602 /// bits from loBit (inclusive) to hiBit (exclusive) will be set. All other
603 /// bits will be zero. For example, with parameters(32, 0, 16) you would get
604 /// 0x0000FFFF. Please call getBitsSetWithWrap if \p loBit may be greater than
605 /// \p hiBit.
606 ///
607 /// \param numBits the intended bit width of the result
608 /// \param loBit the index of the lowest bit set.
609 /// \param hiBit the index of the highest bit set.
610 ///
611 /// \returns An APInt value with the requested bits set.
612 static APInt getBitsSet(unsigned numBits, unsigned loBit, unsigned hiBit) {
613 assert(loBit <= hiBit && "loBit greater than hiBit")(static_cast<void> (0));
614 APInt Res(numBits, 0);
615 Res.setBits(loBit, hiBit);
616 return Res;
617 }
618
619 /// Wrap version of getBitsSet.
620 /// If \p hiBit is bigger than \p loBit, this is same with getBitsSet.
621 /// If \p hiBit is not bigger than \p loBit, the set bits "wrap". For example,
622 /// with parameters (32, 28, 4), you would get 0xF000000F.
623 /// If \p hiBit is equal to \p loBit, you would get a result with all bits
624 /// set.
625 static APInt getBitsSetWithWrap(unsigned numBits, unsigned loBit,
626 unsigned hiBit) {
627 APInt Res(numBits, 0);
628 Res.setBitsWithWrap(loBit, hiBit);
629 return Res;
630 }
631
632 /// Get a value with upper bits starting at loBit set.
633 ///
634 /// Constructs an APInt value that has a contiguous range of bits set. The
635 /// bits from loBit (inclusive) to numBits (exclusive) will be set. All other
636 /// bits will be zero. For example, with parameters(32, 12) you would get
637 /// 0xFFFFF000.
638 ///
639 /// \param numBits the intended bit width of the result
640 /// \param loBit the index of the lowest bit to set.
641 ///
642 /// \returns An APInt value with the requested bits set.
643 static APInt getBitsSetFrom(unsigned numBits, unsigned loBit) {
644 APInt Res(numBits, 0);
645 Res.setBitsFrom(loBit);
646 return Res;
647 }
648
649 /// Get a value with high bits set
650 ///
651 /// Constructs an APInt value that has the top hiBitsSet bits set.
652 ///
653 /// \param numBits the bitwidth of the result
654 /// \param hiBitsSet the number of high-order bits set in the result.
655 static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet) {
656 APInt Res(numBits, 0);
657 Res.setHighBits(hiBitsSet);
658 return Res;
659 }
660
661 /// Get a value with low bits set
662 ///
663 /// Constructs an APInt value that has the bottom loBitsSet bits set.
664 ///
665 /// \param numBits the bitwidth of the result
666 /// \param loBitsSet the number of low-order bits set in the result.
667 static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet) {
668 APInt Res(numBits, 0);
669 Res.setLowBits(loBitsSet);
670 return Res;
671 }
672
673 /// Return a value containing V broadcasted over NewLen bits.
674 static APInt getSplat(unsigned NewLen, const APInt &V);
675
676 /// Determine if two APInts have the same value, after zero-extending
677 /// one of them (if needed!) to ensure that the bit-widths match.
678 static bool isSameValue(const APInt &I1, const APInt &I2) {
679 if (I1.getBitWidth() == I2.getBitWidth())
680 return I1 == I2;
681
682 if (I1.getBitWidth() > I2.getBitWidth())
683 return I1 == I2.zext(I1.getBitWidth());
684
685 return I1.zext(I2.getBitWidth()) == I2;
686 }
687
688 /// Overload to compute a hash_code for an APInt value.
689 friend hash_code hash_value(const APInt &Arg);
690
691 /// This function returns a pointer to the internal storage of the APInt.
692 /// This is useful for writing out the APInt in binary form without any
693 /// conversions.
694 const uint64_t *getRawData() const {
695 if (isSingleWord())
696 return &U.VAL;
697 return &U.pVal[0];
698 }
699
700 /// @}
701 /// \name Unary Operators
702 /// @{
703
704 /// Postfix increment operator.
705 ///
706 /// Increments *this by 1.
707 ///
708 /// \returns a new APInt value representing the original value of *this.
709 APInt operator++(int) {
710 APInt API(*this);
711 ++(*this);
712 return API;
713 }
714
715 /// Prefix increment operator.
716 ///
717 /// \returns *this incremented by one
718 APInt &operator++();
719
720 /// Postfix decrement operator.
721 ///
722 /// Decrements *this by 1.
723 ///
724 /// \returns a new APInt value representing the original value of *this.
725 APInt operator--(int) {
726 APInt API(*this);
727 --(*this);
728 return API;
729 }
730
731 /// Prefix decrement operator.
732 ///
733 /// \returns *this decremented by one.
734 APInt &operator--();
735
736 /// Logical negation operator.
737 ///
738 /// Performs logical negation operation on this APInt.
739 ///
740 /// \returns true if *this is zero, false otherwise.
741 bool operator!() const {
742 if (isSingleWord())
743 return U.VAL == 0;
744 return countLeadingZerosSlowCase() == BitWidth;
745 }
746
747 /// @}
748 /// \name Assignment Operators
749 /// @{
750
751 /// Copy assignment operator.
752 ///
753 /// \returns *this after assignment of RHS.
754 APInt &operator=(const APInt &RHS) {
755 // If the bitwidths are the same, we can avoid mucking with memory
756 if (isSingleWord() && RHS.isSingleWord()) {
757 U.VAL = RHS.U.VAL;
758 BitWidth = RHS.BitWidth;
759 return clearUnusedBits();
760 }
761
762 AssignSlowCase(RHS);
763 return *this;
764 }
765
766 /// Move assignment operator.
767 APInt &operator=(APInt &&that) {
768#ifdef EXPENSIVE_CHECKS
769 // Some std::shuffle implementations still do self-assignment.
770 if (this == &that)
771 return *this;
772#endif
773 assert(this != &that && "Self-move not supported")(static_cast<void> (0));
774 if (!isSingleWord())
775 delete[] U.pVal;
776
777 // Use memcpy so that type based alias analysis sees both VAL and pVal
778 // as modified.
779 memcpy(&U, &that.U, sizeof(U));
780
781 BitWidth = that.BitWidth;
782 that.BitWidth = 0;
783
784 return *this;
785 }
786
787 /// Assignment operator.
788 ///
789 /// The RHS value is assigned to *this. If the significant bits in RHS exceed
790 /// the bit width, the excess bits are truncated. If the bit width is larger
791 /// than 64, the value is zero filled in the unspecified high order bits.
792 ///
793 /// \returns *this after assignment of RHS value.
794 APInt &operator=(uint64_t RHS) {
795 if (isSingleWord()) {
796 U.VAL = RHS;
797 return clearUnusedBits();
798 }
799 U.pVal[0] = RHS;
800 memset(U.pVal + 1, 0, (getNumWords() - 1) * APINT_WORD_SIZE);
801 return *this;
802 }
803
804 /// Bitwise AND assignment operator.
805 ///
806 /// Performs a bitwise AND operation on this APInt and RHS. The result is
807 /// assigned to *this.
808 ///
809 /// \returns *this after ANDing with RHS.
810 APInt &operator&=(const APInt &RHS) {
811 assert(BitWidth == RHS.BitWidth && "Bit widths must be the same")(static_cast<void> (0));
812 if (isSingleWord())
813 U.VAL &= RHS.U.VAL;
814 else
815 AndAssignSlowCase(RHS);
816 return *this;
817 }
818
819 /// Bitwise AND assignment operator.
820 ///
821 /// Performs a bitwise AND operation on this APInt and RHS. RHS is
822 /// logically zero-extended or truncated to match the bit-width of
823 /// the LHS.
824 APInt &operator&=(uint64_t RHS) {
825 if (isSingleWord()) {
826 U.VAL &= RHS;
827 return *this;
828 }
829 U.pVal[0] &= RHS;
830 memset(U.pVal+1, 0, (getNumWords() - 1) * APINT_WORD_SIZE);
831 return *this;
832 }
833
834 /// Bitwise OR assignment operator.
835 ///
836 /// Performs a bitwise OR operation on this APInt and RHS. The result is
837 /// assigned *this;
838 ///
839 /// \returns *this after ORing with RHS.
840 APInt &operator|=(const APInt &RHS) {
841 assert(BitWidth == RHS.BitWidth && "Bit widths must be the same")(static_cast<void> (0));
842 if (isSingleWord())
843 U.VAL |= RHS.U.VAL;
844 else
845 OrAssignSlowCase(RHS);
846 return *this;
847 }
848
849 /// Bitwise OR assignment operator.
850 ///
851 /// Performs a bitwise OR operation on this APInt and RHS. RHS is
852 /// logically zero-extended or truncated to match the bit-width of
853 /// the LHS.
854 APInt &operator|=(uint64_t RHS) {
855 if (isSingleWord()) {
856 U.VAL |= RHS;
857 return clearUnusedBits();
858 }
859 U.pVal[0] |= RHS;
860 return *this;
861 }
862
863 /// Bitwise XOR assignment operator.
864 ///
865 /// Performs a bitwise XOR operation on this APInt and RHS. The result is
866 /// assigned to *this.
867 ///
868 /// \returns *this after XORing with RHS.
869 APInt &operator^=(const APInt &RHS) {
870 assert(BitWidth == RHS.BitWidth && "Bit widths must be the same")(static_cast<void> (0));
871 if (isSingleWord())
872 U.VAL ^= RHS.U.VAL;
873 else
874 XorAssignSlowCase(RHS);
875 return *this;
876 }
877
878 /// Bitwise XOR assignment operator.
879 ///
880 /// Performs a bitwise XOR operation on this APInt and RHS. RHS is
881 /// logically zero-extended or truncated to match the bit-width of
882 /// the LHS.
883 APInt &operator^=(uint64_t RHS) {
884 if (isSingleWord()) {
885 U.VAL ^= RHS;
886 return clearUnusedBits();
887 }
888 U.pVal[0] ^= RHS;
889 return *this;
890 }
891
892 /// Multiplication assignment operator.
893 ///
894 /// Multiplies this APInt by RHS and assigns the result to *this.
895 ///
896 /// \returns *this
897 APInt &operator*=(const APInt &RHS);
898 APInt &operator*=(uint64_t RHS);
899
900 /// Addition assignment operator.
901 ///
902 /// Adds RHS to *this and assigns the result to *this.
903 ///
904 /// \returns *this
905 APInt &operator+=(const APInt &RHS);
906 APInt &operator+=(uint64_t RHS);
907
908 /// Subtraction assignment operator.
909 ///
910 /// Subtracts RHS from *this and assigns the result to *this.
911 ///
912 /// \returns *this
913 APInt &operator-=(const APInt &RHS);
914 APInt &operator-=(uint64_t RHS);
915
916 /// Left-shift assignment function.
917 ///
918 /// Shifts *this left by shiftAmt and assigns the result to *this.
919 ///
920 /// \returns *this after shifting left by ShiftAmt
921 APInt &operator<<=(unsigned ShiftAmt) {
922 assert(ShiftAmt <= BitWidth && "Invalid shift amount")(static_cast<void> (0));
923 if (isSingleWord()) {
57
Calling 'APInt::isSingleWord'
60
Returning from 'APInt::isSingleWord'
61
Taking true branch
924 if (ShiftAmt
61.1
'ShiftAmt' is not equal to field 'BitWidth'
61.1
'ShiftAmt' is not equal to field 'BitWidth'
== BitWidth)
62
Taking false branch
925 U.VAL = 0;
926 else
927 U.VAL <<= ShiftAmt;
63
Assigned value is garbage or undefined
928 return clearUnusedBits();
929 }
930 shlSlowCase(ShiftAmt);
931 return *this;
932 }
933
934 /// Left-shift assignment function.
935 ///
936 /// Shifts *this left by shiftAmt and assigns the result to *this.
937 ///
938 /// \returns *this after shifting left by ShiftAmt
939 APInt &operator<<=(const APInt &ShiftAmt);
940
941 /// @}
942 /// \name Binary Operators
943 /// @{
944
945 /// Multiplication operator.
946 ///
947 /// Multiplies this APInt by RHS and returns the result.
948 APInt operator*(const APInt &RHS) const;
949
950 /// Left logical shift operator.
951 ///
952 /// Shifts this APInt left by \p Bits and returns the result.
953 APInt operator<<(unsigned Bits) const { return shl(Bits); }
954
955 /// Left logical shift operator.
956 ///
957 /// Shifts this APInt left by \p Bits and returns the result.
958 APInt operator<<(const APInt &Bits) const { return shl(Bits); }
959
960 /// Arithmetic right-shift function.
961 ///
962 /// Arithmetic right-shift this APInt by shiftAmt.
963 APInt ashr(unsigned ShiftAmt) const {
964 APInt R(*this);
965 R.ashrInPlace(ShiftAmt);
966 return R;
967 }
968
969 /// Arithmetic right-shift this APInt by ShiftAmt in place.
970 void ashrInPlace(unsigned ShiftAmt) {
971 assert(ShiftAmt <= BitWidth && "Invalid shift amount")(static_cast<void> (0));
972 if (isSingleWord()) {
973 int64_t SExtVAL = SignExtend64(U.VAL, BitWidth);
974 if (ShiftAmt == BitWidth)
975 U.VAL = SExtVAL >> (APINT_BITS_PER_WORD - 1); // Fill with sign bit.
976 else
977 U.VAL = SExtVAL >> ShiftAmt;
978 clearUnusedBits();
979 return;
980 }
981 ashrSlowCase(ShiftAmt);
982 }
983
984 /// Logical right-shift function.
985 ///
986 /// Logical right-shift this APInt by shiftAmt.
987 APInt lshr(unsigned shiftAmt) const {
988 APInt R(*this);
989 R.lshrInPlace(shiftAmt);
990 return R;
991 }
992
993 /// Logical right-shift this APInt by ShiftAmt in place.
994 void lshrInPlace(unsigned ShiftAmt) {
995 assert(ShiftAmt <= BitWidth && "Invalid shift amount")(static_cast<void> (0));
996 if (isSingleWord()) {
997 if (ShiftAmt == BitWidth)
998 U.VAL = 0;
999 else
1000 U.VAL >>= ShiftAmt;
1001 return;
1002 }
1003 lshrSlowCase(ShiftAmt);
1004 }
1005
1006 /// Left-shift function.
1007 ///
1008 /// Left-shift this APInt by shiftAmt.
1009 APInt shl(unsigned shiftAmt) const {
1010 APInt R(*this);
1011 R <<= shiftAmt;
1012 return R;
1013 }
1014
1015 /// Rotate left by rotateAmt.
1016 APInt rotl(unsigned rotateAmt) const;
1017
1018 /// Rotate right by rotateAmt.
1019 APInt rotr(unsigned rotateAmt) const;
1020
1021 /// Arithmetic right-shift function.
1022 ///
1023 /// Arithmetic right-shift this APInt by shiftAmt.
1024 APInt ashr(const APInt &ShiftAmt) const {
1025 APInt R(*this);
1026 R.ashrInPlace(ShiftAmt);
1027 return R;
1028 }
1029
1030 /// Arithmetic right-shift this APInt by shiftAmt in place.
1031 void ashrInPlace(const APInt &shiftAmt);
1032
1033 /// Logical right-shift function.
1034 ///
1035 /// Logical right-shift this APInt by shiftAmt.
1036 APInt lshr(const APInt &ShiftAmt) const {
1037 APInt R(*this);
1038 R.lshrInPlace(ShiftAmt);
1039 return R;
1040 }
1041
1042 /// Logical right-shift this APInt by ShiftAmt in place.
1043 void lshrInPlace(const APInt &ShiftAmt);
1044
1045 /// Left-shift function.
1046 ///
1047 /// Left-shift this APInt by shiftAmt.
1048 APInt shl(const APInt &ShiftAmt) const {
1049 APInt R(*this);
1050 R <<= ShiftAmt;
1051 return R;
1052 }
1053
1054 /// Rotate left by rotateAmt.
1055 APInt rotl(const APInt &rotateAmt) const;
1056
1057 /// Rotate right by rotateAmt.
1058 APInt rotr(const APInt &rotateAmt) const;
1059
1060 /// Unsigned division operation.
1061 ///
1062 /// Perform an unsigned divide operation on this APInt by RHS. Both this and
1063 /// RHS are treated as unsigned quantities for purposes of this division.
1064 ///
1065 /// \returns a new APInt value containing the division result, rounded towards
1066 /// zero.
1067 APInt udiv(const APInt &RHS) const;
1068 APInt udiv(uint64_t RHS) const;
1069
1070 /// Signed division function for APInt.
1071 ///
1072 /// Signed divide this APInt by APInt RHS.
1073 ///
1074 /// The result is rounded towards zero.
1075 APInt sdiv(const APInt &RHS) const;
1076 APInt sdiv(int64_t RHS) const;
1077
1078 /// Unsigned remainder operation.
1079 ///
1080 /// Perform an unsigned remainder operation on this APInt with RHS being the
1081 /// divisor. Both this and RHS are treated as unsigned quantities for purposes
1082 /// of this operation. Note that this is a true remainder operation and not a
1083 /// modulo operation because the sign follows the sign of the dividend which
1084 /// is *this.
1085 ///
1086 /// \returns a new APInt value containing the remainder result
1087 APInt urem(const APInt &RHS) const;
1088 uint64_t urem(uint64_t RHS) const;
1089
1090 /// Function for signed remainder operation.
1091 ///
1092 /// Signed remainder operation on APInt.
1093 APInt srem(const APInt &RHS) const;
1094 int64_t srem(int64_t RHS) const;
1095
1096 /// Dual division/remainder interface.
1097 ///
1098 /// Sometimes it is convenient to divide two APInt values and obtain both the
1099 /// quotient and remainder. This function does both operations in the same
1100 /// computation making it a little more efficient. The pair of input arguments
1101 /// may overlap with the pair of output arguments. It is safe to call
1102 /// udivrem(X, Y, X, Y), for example.
1103 static void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient,
1104 APInt &Remainder);
1105 static void udivrem(const APInt &LHS, uint64_t RHS, APInt &Quotient,
1106 uint64_t &Remainder);
1107
1108 static void sdivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient,
1109 APInt &Remainder);
1110 static void sdivrem(const APInt &LHS, int64_t RHS, APInt &Quotient,
1111 int64_t &Remainder);
1112
1113 // Operations that return overflow indicators.
1114 APInt sadd_ov(const APInt &RHS, bool &Overflow) const;
1115 APInt uadd_ov(const APInt &RHS, bool &Overflow) const;
1116 APInt ssub_ov(const APInt &RHS, bool &Overflow) const;
1117 APInt usub_ov(const APInt &RHS, bool &Overflow) const;
1118 APInt sdiv_ov(const APInt &RHS, bool &Overflow) const;
1119 APInt smul_ov(const APInt &RHS, bool &Overflow) const;
1120 APInt umul_ov(const APInt &RHS, bool &Overflow) const;
1121 APInt sshl_ov(const APInt &Amt, bool &Overflow) const;
1122 APInt ushl_ov(const APInt &Amt, bool &Overflow) const;
1123
1124 // Operations that saturate
1125 APInt sadd_sat(const APInt &RHS) const;
1126 APInt uadd_sat(const APInt &RHS) const;
1127 APInt ssub_sat(const APInt &RHS) const;
1128 APInt usub_sat(const APInt &RHS) const;
1129 APInt smul_sat(const APInt &RHS) const;
1130 APInt umul_sat(const APInt &RHS) const;
1131 APInt sshl_sat(const APInt &RHS) const;
1132 APInt ushl_sat(const APInt &RHS) const;
1133
1134 /// Array-indexing support.
1135 ///
1136 /// \returns the bit value at bitPosition
1137 bool operator[](unsigned bitPosition) const {
1138 assert(bitPosition < getBitWidth() && "Bit position out of bounds!")(static_cast<void> (0));
1139 return (maskBit(bitPosition) & getWord(bitPosition)) != 0;
1140 }
1141
1142 /// @}
1143 /// \name Comparison Operators
1144 /// @{
1145
1146 /// Equality operator.
1147 ///
1148 /// Compares this APInt with RHS for the validity of the equality
1149 /// relationship.
1150 bool operator==(const APInt &RHS) const {
1151 assert(BitWidth == RHS.BitWidth && "Comparison requires equal bit widths")(static_cast<void> (0));
1152 if (isSingleWord())
1153 return U.VAL == RHS.U.VAL;
1154 return EqualSlowCase(RHS);
1155 }
1156
1157 /// Equality operator.
1158 ///
1159 /// Compares this APInt with a uint64_t for the validity of the equality
1160 /// relationship.
1161 ///
1162 /// \returns true if *this == Val
1163 bool operator==(uint64_t Val) const {
1164 return (isSingleWord() || getActiveBits() <= 64) && getZExtValue() == Val;
1165 }
1166
1167 /// Equality comparison.
1168 ///
1169 /// Compares this APInt with RHS for the validity of the equality
1170 /// relationship.
1171 ///
1172 /// \returns true if *this == Val
1173 bool eq(const APInt &RHS) const { return (*this) == RHS; }
1174
1175 /// Inequality operator.
1176 ///
1177 /// Compares this APInt with RHS for the validity of the inequality
1178 /// relationship.
1179 ///
1180 /// \returns true if *this != Val
1181 bool operator!=(const APInt &RHS) const { return !((*this) == RHS); }
1182
1183 /// Inequality operator.
1184 ///
1185 /// Compares this APInt with a uint64_t for the validity of the inequality
1186 /// relationship.
1187 ///
1188 /// \returns true if *this != Val
1189 bool operator!=(uint64_t Val) const { return !((*this) == Val); }
1190
1191 /// Inequality comparison
1192 ///
1193 /// Compares this APInt with RHS for the validity of the inequality
1194 /// relationship.
1195 ///
1196 /// \returns true if *this != Val
1197 bool ne(const APInt &RHS) const { return !((*this) == RHS); }
1198
1199 /// Unsigned less than comparison
1200 ///
1201 /// Regards both *this and RHS as unsigned quantities and compares them for
1202 /// the validity of the less-than relationship.
1203 ///
1204 /// \returns true if *this < RHS when both are considered unsigned.
1205 bool ult(const APInt &RHS) const { return compare(RHS) < 0; }
1206
1207 /// Unsigned less than comparison
1208 ///
1209 /// Regards both *this as an unsigned quantity and compares it with RHS for
1210 /// the validity of the less-than relationship.
1211 ///
1212 /// \returns true if *this < RHS when considered unsigned.
1213 bool ult(uint64_t RHS) const {
1214 // Only need to check active bits if not a single word.
1215 return (isSingleWord() || getActiveBits() <= 64) && getZExtValue() < RHS;
1216 }
1217
1218 /// Signed less than comparison
1219 ///
1220 /// Regards both *this and RHS as signed quantities and compares them for
1221 /// validity of the less-than relationship.
1222 ///
1223 /// \returns true if *this < RHS when both are considered signed.
1224 bool slt(const APInt &RHS) const { return compareSigned(RHS) < 0; }
1225
1226 /// Signed less than comparison
1227 ///
1228 /// Regards both *this as a signed quantity and compares it with RHS for
1229 /// the validity of the less-than relationship.
1230 ///
1231 /// \returns true if *this < RHS when considered signed.
1232 bool slt(int64_t RHS) const {
1233 return (!isSingleWord() && getMinSignedBits() > 64) ? isNegative()
1234 : getSExtValue() < RHS;
1235 }
1236
1237 /// Unsigned less or equal comparison
1238 ///
1239 /// Regards both *this and RHS as unsigned quantities and compares them for
1240 /// validity of the less-or-equal relationship.
1241 ///
1242 /// \returns true if *this <= RHS when both are considered unsigned.
1243 bool ule(const APInt &RHS) const { return compare(RHS) <= 0; }
1244
1245 /// Unsigned less or equal comparison
1246 ///
1247 /// Regards both *this as an unsigned quantity and compares it with RHS for
1248 /// the validity of the less-or-equal relationship.
1249 ///
1250 /// \returns true if *this <= RHS when considered unsigned.
1251 bool ule(uint64_t RHS) const { return !ugt(RHS); }
1252
1253 /// Signed less or equal comparison
1254 ///
1255 /// Regards both *this and RHS as signed quantities and compares them for
1256 /// validity of the less-or-equal relationship.
1257 ///
1258 /// \returns true if *this <= RHS when both are considered signed.
1259 bool sle(const APInt &RHS) const { return compareSigned(RHS) <= 0; }
1260
1261 /// Signed less or equal comparison
1262 ///
1263 /// Regards both *this as a signed quantity and compares it with RHS for the
1264 /// validity of the less-or-equal relationship.
1265 ///
1266 /// \returns true if *this <= RHS when considered signed.
1267 bool sle(uint64_t RHS) const { return !sgt(RHS); }
1268
1269 /// Unsigned greater than comparison
1270 ///
1271 /// Regards both *this and RHS as unsigned quantities and compares them for
1272 /// the validity of the greater-than relationship.
1273 ///
1274 /// \returns true if *this > RHS when both are considered unsigned.
1275 bool ugt(const APInt &RHS) const { return !ule(RHS); }
1276
1277 /// Unsigned greater than comparison
1278 ///
1279 /// Regards both *this as an unsigned quantity and compares it with RHS for
1280 /// the validity of the greater-than relationship.
1281 ///
1282 /// \returns true if *this > RHS when considered unsigned.
1283 bool ugt(uint64_t RHS) const {
1284 // Only need to check active bits if not a single word.
1285 return (!isSingleWord() && getActiveBits() > 64) || getZExtValue() > RHS;
1286 }
1287
1288 /// Signed greater than comparison
1289 ///
1290 /// Regards both *this and RHS as signed quantities and compares them for the
1291 /// validity of the greater-than relationship.
1292 ///
1293 /// \returns true if *this > RHS when both are considered signed.
1294 bool sgt(const APInt &RHS) const { return !sle(RHS); }
1295
1296 /// Signed greater than comparison
1297 ///
1298 /// Regards both *this as a signed quantity and compares it with RHS for
1299 /// the validity of the greater-than relationship.
1300 ///
1301 /// \returns true if *this > RHS when considered signed.
1302 bool sgt(int64_t RHS) const {
1303 return (!isSingleWord() && getMinSignedBits() > 64) ? !isNegative()
1304 : getSExtValue() > RHS;
1305 }
1306
1307 /// Unsigned greater or equal comparison
1308 ///
1309 /// Regards both *this and RHS as unsigned quantities and compares them for
1310 /// validity of the greater-or-equal relationship.
1311 ///
1312 /// \returns true if *this >= RHS when both are considered unsigned.
1313 bool uge(const APInt &RHS) const { return !ult(RHS); }
1314
1315 /// Unsigned greater or equal comparison
1316 ///
1317 /// Regards both *this as an unsigned quantity and compares it with RHS for
1318 /// the validity of the greater-or-equal relationship.
1319 ///
1320 /// \returns true if *this >= RHS when considered unsigned.
1321 bool uge(uint64_t RHS) const { return !ult(RHS); }
1322
1323 /// Signed greater or equal comparison
1324 ///
1325 /// Regards both *this and RHS as signed quantities and compares them for
1326 /// validity of the greater-or-equal relationship.
1327 ///
1328 /// \returns true if *this >= RHS when both are considered signed.
1329 bool sge(const APInt &RHS) const { return !slt(RHS); }
1330
1331 /// Signed greater or equal comparison
1332 ///
1333 /// Regards both *this as a signed quantity and compares it with RHS for
1334 /// the validity of the greater-or-equal relationship.
1335 ///
1336 /// \returns true if *this >= RHS when considered signed.
1337 bool sge(int64_t RHS) const { return !slt(RHS); }
1338
1339 /// This operation tests if there are any pairs of corresponding bits
1340 /// between this APInt and RHS that are both set.
1341 bool intersects(const APInt &RHS) const {
1342 assert(BitWidth == RHS.BitWidth && "Bit widths must be the same")(static_cast<void> (0));
1343 if (isSingleWord())
1344 return (U.VAL & RHS.U.VAL) != 0;
1345 return intersectsSlowCase(RHS);
1346 }
1347
1348 /// This operation checks that all bits set in this APInt are also set in RHS.
1349 bool isSubsetOf(const APInt &RHS) const {
1350 assert(BitWidth == RHS.BitWidth && "Bit widths must be the same")(static_cast<void> (0));
1351 if (isSingleWord())
1352 return (U.VAL & ~RHS.U.VAL) == 0;
1353 return isSubsetOfSlowCase(RHS);
1354 }
1355
1356 /// @}
1357 /// \name Resizing Operators
1358 /// @{
1359
1360 /// Truncate to new width.
1361 ///
1362 /// Truncate the APInt to a specified width. It is an error to specify a width
1363 /// that is greater than or equal to the current width.
1364 APInt trunc(unsigned width) const;
1365
1366 /// Truncate to new width with unsigned saturation.
1367 ///
1368 /// If the APInt, treated as unsigned integer, can be losslessly truncated to
1369 /// the new bitwidth, then return truncated APInt. Else, return max value.
1370 APInt truncUSat(unsigned width) const;
1371
1372 /// Truncate to new width with signed saturation.
1373 ///
1374 /// If this APInt, treated as signed integer, can be losslessly truncated to
1375 /// the new bitwidth, then return truncated APInt. Else, return either
1376 /// signed min value if the APInt was negative, or signed max value.
1377 APInt truncSSat(unsigned width) const;
1378
1379 /// Sign extend to a new width.
1380 ///
1381 /// This operation sign extends the APInt to a new width. If the high order
1382 /// bit is set, the fill on the left will be done with 1 bits, otherwise zero.
1383 /// It is an error to specify a width that is less than or equal to the
1384 /// current width.
1385 APInt sext(unsigned width) const;
1386
1387 /// Zero extend to a new width.
1388 ///
1389 /// This operation zero extends the APInt to a new width. The high order bits
1390 /// are filled with 0 bits. It is an error to specify a width that is less
1391 /// than or equal to the current width.
1392 APInt zext(unsigned width) const;
1393
1394 /// Sign extend or truncate to width
1395 ///
1396 /// Make this APInt have the bit width given by \p width. The value is sign
1397 /// extended, truncated, or left alone to make it that width.
1398 APInt sextOrTrunc(unsigned width) const;
1399
1400 /// Zero extend or truncate to width
1401 ///
1402 /// Make this APInt have the bit width given by \p width. The value is zero
1403 /// extended, truncated, or left alone to make it that width.
1404 APInt zextOrTrunc(unsigned width) const;
1405
1406 /// Truncate to width
1407 ///
1408 /// Make this APInt have the bit width given by \p width. The value is
1409 /// truncated or left alone to make it that width.
1410 APInt truncOrSelf(unsigned width) const;
1411
1412 /// Sign extend or truncate to width
1413 ///
1414 /// Make this APInt have the bit width given by \p width. The value is sign
1415 /// extended, or left alone to make it that width.
1416 APInt sextOrSelf(unsigned width) const;
1417
1418 /// Zero extend or truncate to width
1419 ///
1420 /// Make this APInt have the bit width given by \p width. The value is zero
1421 /// extended, or left alone to make it that width.
1422 APInt zextOrSelf(unsigned width) const;
1423
1424 /// @}
1425 /// \name Bit Manipulation Operators
1426 /// @{
1427
1428 /// Set every bit to 1.
1429 void setAllBits() {
1430 if (isSingleWord())
1431 U.VAL = WORDTYPE_MAX;
1432 else
1433 // Set all the bits in all the words.
1434 memset(U.pVal, -1, getNumWords() * APINT_WORD_SIZE);
1435 // Clear the unused ones
1436 clearUnusedBits();
1437 }
1438
1439 /// Set a given bit to 1.
1440 ///
1441 /// Set the given bit to 1 whose position is given as "bitPosition".
1442 void setBit(unsigned BitPosition) {
1443 assert(BitPosition < BitWidth && "BitPosition out of range")(static_cast<void> (0));
1444 WordType Mask = maskBit(BitPosition);
1445 if (isSingleWord())
1446 U.VAL |= Mask;
1447 else
1448 U.pVal[whichWord(BitPosition)] |= Mask;
1449 }
1450
1451 /// Set the sign bit to 1.
1452 void setSignBit() {
1453 setBit(BitWidth - 1);
1454 }
1455
1456 /// Set a given bit to a given value.
1457 void setBitVal(unsigned BitPosition, bool BitValue) {
1458 if (BitValue)
1459 setBit(BitPosition);
1460 else
1461 clearBit(BitPosition);
1462 }
1463
1464 /// Set the bits from loBit (inclusive) to hiBit (exclusive) to 1.
1465 /// This function handles "wrap" case when \p loBit >= \p hiBit, and calls
1466 /// setBits when \p loBit < \p hiBit.
1467 /// For \p loBit == \p hiBit wrap case, set every bit to 1.
1468 void setBitsWithWrap(unsigned loBit, unsigned hiBit) {
1469 assert(hiBit <= BitWidth && "hiBit out of range")(static_cast<void> (0));
1470 assert(loBit <= BitWidth && "loBit out of range")(static_cast<void> (0));
1471 if (loBit < hiBit) {
1472 setBits(loBit, hiBit);
1473 return;
1474 }
1475 setLowBits(hiBit);
1476 setHighBits(BitWidth - loBit);
1477 }
1478
1479 /// Set the bits from loBit (inclusive) to hiBit (exclusive) to 1.
1480 /// This function handles case when \p loBit <= \p hiBit.
1481 void setBits(unsigned loBit, unsigned hiBit) {
1482 assert(hiBit <= BitWidth && "hiBit out of range")(static_cast<void> (0));
1483 assert(loBit <= BitWidth && "loBit out of range")(static_cast<void> (0));
1484 assert(loBit <= hiBit && "loBit greater than hiBit")(static_cast<void> (0));
1485 if (loBit == hiBit)
1486 return;
1487 if (loBit < APINT_BITS_PER_WORD && hiBit <= APINT_BITS_PER_WORD) {
1488 uint64_t mask = WORDTYPE_MAX >> (APINT_BITS_PER_WORD - (hiBit - loBit));
1489 mask <<= loBit;
1490 if (isSingleWord())
1491 U.VAL |= mask;
1492 else
1493 U.pVal[0] |= mask;
1494 } else {
1495 setBitsSlowCase(loBit, hiBit);
1496 }
1497 }
1498
1499 /// Set the top bits starting from loBit.
1500 void setBitsFrom(unsigned loBit) {
1501 return setBits(loBit, BitWidth);
1502 }
1503
1504 /// Set the bottom loBits bits.
1505 void setLowBits(unsigned loBits) {
1506 return setBits(0, loBits);
1507 }
1508
1509 /// Set the top hiBits bits.
1510 void setHighBits(unsigned hiBits) {
1511 return setBits(BitWidth - hiBits, BitWidth);
1512 }
1513
1514 /// Set every bit to 0.
1515 void clearAllBits() {
1516 if (isSingleWord())
1517 U.VAL = 0;
1518 else
1519 memset(U.pVal, 0, getNumWords() * APINT_WORD_SIZE);
1520 }
1521
1522 /// Set a given bit to 0.
1523 ///
1524 /// Set the given bit to 0 whose position is given as "bitPosition".
1525 void clearBit(unsigned BitPosition) {
1526 assert(BitPosition < BitWidth && "BitPosition out of range")(static_cast<void> (0));
1527 WordType Mask = ~maskBit(BitPosition);
1528 if (isSingleWord())
1529 U.VAL &= Mask;
1530 else
1531 U.pVal[whichWord(BitPosition)] &= Mask;
1532 }
1533
1534 /// Set bottom loBits bits to 0.
1535 void clearLowBits(unsigned loBits) {
1536 assert(loBits <= BitWidth && "More bits than bitwidth")(static_cast<void> (0));
1537 APInt Keep = getHighBitsSet(BitWidth, BitWidth - loBits);
1538 *this &= Keep;
1539 }
1540
1541 /// Set the sign bit to 0.
1542 void clearSignBit() {
1543 clearBit(BitWidth - 1);
1544 }
1545
1546 /// Toggle every bit to its opposite value.
1547 void flipAllBits() {
1548 if (isSingleWord()) {
1549 U.VAL ^= WORDTYPE_MAX;
1550 clearUnusedBits();
1551 } else {
1552 flipAllBitsSlowCase();
1553 }
1554 }
1555
1556 /// Toggles a given bit to its opposite value.
1557 ///
1558 /// Toggle a given bit to its opposite value whose position is given
1559 /// as "bitPosition".
1560 void flipBit(unsigned bitPosition);
1561
1562 /// Negate this APInt in place.
1563 void negate() {
1564 flipAllBits();
1565 ++(*this);
1566 }
1567
1568 /// Insert the bits from a smaller APInt starting at bitPosition.
1569 void insertBits(const APInt &SubBits, unsigned bitPosition);
1570 void insertBits(uint64_t SubBits, unsigned bitPosition, unsigned numBits);
1571
1572 /// Return an APInt with the extracted bits [bitPosition,bitPosition+numBits).
1573 APInt extractBits(unsigned numBits, unsigned bitPosition) const;
1574 uint64_t extractBitsAsZExtValue(unsigned numBits, unsigned bitPosition) const;
1575
1576 /// @}
1577 /// \name Value Characterization Functions
1578 /// @{
1579
1580 /// Return the number of bits in the APInt.
1581 unsigned getBitWidth() const { return BitWidth; }
1582
1583 /// Get the number of words.
1584 ///
1585 /// Here one word's bitwidth equals to that of uint64_t.
1586 ///
1587 /// \returns the number of words to hold the integer value of this APInt.
1588 unsigned getNumWords() const { return getNumWords(BitWidth); }
1589
1590 /// Get the number of words.
1591 ///
1592 /// *NOTE* Here one word's bitwidth equals to that of uint64_t.
1593 ///
1594 /// \returns the number of words to hold the integer value with a given bit
1595 /// width.
1596 static unsigned getNumWords(unsigned BitWidth) {
1597 return ((uint64_t)BitWidth + APINT_BITS_PER_WORD - 1) / APINT_BITS_PER_WORD;
1598 }
1599
1600 /// Compute the number of active bits in the value
1601 ///
1602 /// This function returns the number of active bits which is defined as the
1603 /// bit width minus the number of leading zeros. This is used in several
1604 /// computations to see how "wide" the value is.
1605 unsigned getActiveBits() const { return BitWidth - countLeadingZeros(); }
1606
1607 /// Compute the number of active words in the value of this APInt.
1608 ///
1609 /// This is used in conjunction with getActiveData to extract the raw value of
1610 /// the APInt.
1611 unsigned getActiveWords() const {
1612 unsigned numActiveBits = getActiveBits();
1613 return numActiveBits ? whichWord(numActiveBits - 1) + 1 : 1;
1614 }
1615
1616 /// Get the minimum bit size for this signed APInt
1617 ///
1618 /// Computes the minimum bit width for this APInt while considering it to be a
1619 /// signed (and probably negative) value. If the value is not negative, this
1620 /// function returns the same value as getActiveBits()+1. Otherwise, it
1621 /// returns the smallest bit width that will retain the negative value. For
1622 /// example, -1 can be written as 0b1 or 0xFFFFFFFFFF. 0b1 is shorter and so
1623 /// for -1, this function will always return 1.
1624 unsigned getMinSignedBits() const { return BitWidth - getNumSignBits() + 1; }
1625
1626 /// Get zero extended value
1627 ///
1628 /// This method attempts to return the value of this APInt as a zero extended
1629 /// uint64_t. The bitwidth must be <= 64 or the value must fit within a
1630 /// uint64_t. Otherwise an assertion will result.
1631 uint64_t getZExtValue() const {
1632 if (isSingleWord())
1633 return U.VAL;
1634 assert(getActiveBits() <= 64 && "Too many bits for uint64_t")(static_cast<void> (0));
1635 return U.pVal[0];
1636 }
1637
1638 /// Get sign extended value
1639 ///
1640 /// This method attempts to return the value of this APInt as a sign extended
1641 /// int64_t. The bit width must be <= 64 or the value must fit within an
1642 /// int64_t. Otherwise an assertion will result.
1643 int64_t getSExtValue() const {
1644 if (isSingleWord())
1645 return SignExtend64(U.VAL, BitWidth);
1646 assert(getMinSignedBits() <= 64 && "Too many bits for int64_t")(static_cast<void> (0));
1647 return int64_t(U.pVal[0]);
1648 }
1649
1650 /// Get bits required for string value.
1651 ///
1652 /// This method determines how many bits are required to hold the APInt
1653 /// equivalent of the string given by \p str.
1654 static unsigned getBitsNeeded(StringRef str, uint8_t radix);
1655
1656 /// The APInt version of the countLeadingZeros functions in
1657 /// MathExtras.h.
1658 ///
1659 /// It counts the number of zeros from the most significant bit to the first
1660 /// one bit.
1661 ///
1662 /// \returns BitWidth if the value is zero, otherwise returns the number of
1663 /// zeros from the most significant bit to the first one bits.
1664 unsigned countLeadingZeros() const {
1665 if (isSingleWord()) {
1666 unsigned unusedBits = APINT_BITS_PER_WORD - BitWidth;
1667 return llvm::countLeadingZeros(U.VAL) - unusedBits;
1668 }
1669 return countLeadingZerosSlowCase();
1670 }
1671
1672 /// Count the number of leading one bits.
1673 ///
1674 /// This function is an APInt version of the countLeadingOnes
1675 /// functions in MathExtras.h. It counts the number of ones from the most
1676 /// significant bit to the first zero bit.
1677 ///
1678 /// \returns 0 if the high order bit is not set, otherwise returns the number
1679 /// of 1 bits from the most significant to the least
1680 unsigned countLeadingOnes() const {
1681 if (isSingleWord())
1682 return llvm::countLeadingOnes(U.VAL << (APINT_BITS_PER_WORD - BitWidth));
1683 return countLeadingOnesSlowCase();
1684 }
1685
1686 /// Computes the number of leading bits of this APInt that are equal to its
1687 /// sign bit.
1688 unsigned getNumSignBits() const {
1689 return isNegative() ? countLeadingOnes() : countLeadingZeros();
1690 }
1691
1692 /// Count the number of trailing zero bits.
1693 ///
1694 /// This function is an APInt version of the countTrailingZeros
1695 /// functions in MathExtras.h. It counts the number of zeros from the least
1696 /// significant bit to the first set bit.
1697 ///
1698 /// \returns BitWidth if the value is zero, otherwise returns the number of
1699 /// zeros from the least significant bit to the first one bit.
1700 unsigned countTrailingZeros() const {
1701 if (isSingleWord()) {
1702 unsigned TrailingZeros = llvm::countTrailingZeros(U.VAL);
1703 return (TrailingZeros > BitWidth ? BitWidth : TrailingZeros);
1704 }
1705 return countTrailingZerosSlowCase();
1706 }
1707
1708 /// Count the number of trailing one bits.
1709 ///
1710 /// This function is an APInt version of the countTrailingOnes
1711 /// functions in MathExtras.h. It counts the number of ones from the least
1712 /// significant bit to the first zero bit.
1713 ///
1714 /// \returns BitWidth if the value is all ones, otherwise returns the number
1715 /// of ones from the least significant bit to the first zero bit.
1716 unsigned countTrailingOnes() const {
1717 if (isSingleWord())
1718 return llvm::countTrailingOnes(U.VAL);
1719 return countTrailingOnesSlowCase();
1720 }
1721
1722 /// Count the number of bits set.
1723 ///
1724 /// This function is an APInt version of the countPopulation functions
1725 /// in MathExtras.h. It counts the number of 1 bits in the APInt value.
1726 ///
1727 /// \returns 0 if the value is zero, otherwise returns the number of set bits.
1728 unsigned countPopulation() const {
1729 if (isSingleWord())
1730 return llvm::countPopulation(U.VAL);
1731 return countPopulationSlowCase();
1732 }
1733
1734 /// @}
1735 /// \name Conversion Functions
1736 /// @{
1737 void print(raw_ostream &OS, bool isSigned) const;
1738
1739 /// Converts an APInt to a string and append it to Str. Str is commonly a
1740 /// SmallString.
1741 void toString(SmallVectorImpl<char> &Str, unsigned Radix, bool Signed,
1742 bool formatAsCLiteral = false) const;
1743
1744 /// Considers the APInt to be unsigned and converts it into a string in the
1745 /// radix given. The radix can be 2, 8, 10 16, or 36.
1746 void toStringUnsigned(SmallVectorImpl<char> &Str, unsigned Radix = 10) const {
1747 toString(Str, Radix, false, false);
1748 }
1749
1750 /// Considers the APInt to be signed and converts it into a string in the
1751 /// radix given. The radix can be 2, 8, 10, 16, or 36.
1752 void toStringSigned(SmallVectorImpl<char> &Str, unsigned Radix = 10) const {
1753 toString(Str, Radix, true, false);
1754 }
1755
1756 /// \returns a byte-swapped representation of this APInt Value.
1757 APInt byteSwap() const;
1758
1759 /// \returns the value with the bit representation reversed of this APInt
1760 /// Value.
1761 APInt reverseBits() const;
1762
1763 /// Converts this APInt to a double value.
1764 double roundToDouble(bool isSigned) const;
1765
1766 /// Converts this unsigned APInt to a double value.
1767 double roundToDouble() const { return roundToDouble(false); }
1768
1769 /// Converts this signed APInt to a double value.
1770 double signedRoundToDouble() const { return roundToDouble(true); }
1771
1772 /// Converts APInt bits to a double
1773 ///
1774 /// The conversion does not do a translation from integer to double, it just
1775 /// re-interprets the bits as a double. Note that it is valid to do this on
1776 /// any bit width. Exactly 64 bits will be translated.
1777 double bitsToDouble() const {
1778 return BitsToDouble(getWord(0));
1779 }
1780
1781 /// Converts APInt bits to a float
1782 ///
1783 /// The conversion does not do a translation from integer to float, it just
1784 /// re-interprets the bits as a float. Note that it is valid to do this on
1785 /// any bit width. Exactly 32 bits will be translated.
1786 float bitsToFloat() const {
1787 return BitsToFloat(static_cast<uint32_t>(getWord(0)));
1788 }
1789
1790 /// Converts a double to APInt bits.
1791 ///
1792 /// The conversion does not do a translation from double to integer, it just
1793 /// re-interprets the bits of the double.
1794 static APInt doubleToBits(double V) {
1795 return APInt(sizeof(double) * CHAR_BIT8, DoubleToBits(V));
1796 }
1797
1798 /// Converts a float to APInt bits.
1799 ///
1800 /// The conversion does not do a translation from float to integer, it just
1801 /// re-interprets the bits of the float.
1802 static APInt floatToBits(float V) {
1803 return APInt(sizeof(float) * CHAR_BIT8, FloatToBits(V));
1804 }
1805
1806 /// @}
1807 /// \name Mathematics Operations
1808 /// @{
1809
1810 /// \returns the floor log base 2 of this APInt.
1811 unsigned logBase2() const { return getActiveBits() - 1; }
1812
1813 /// \returns the ceil log base 2 of this APInt.
1814 unsigned ceilLogBase2() const {
1815 APInt temp(*this);
1816 --temp;
1817 return temp.getActiveBits();
1818 }
1819
1820 /// \returns the nearest log base 2 of this APInt. Ties round up.
1821 ///
1822 /// NOTE: When we have a BitWidth of 1, we define:
1823 ///
1824 /// log2(0) = UINT32_MAX
1825 /// log2(1) = 0
1826 ///
1827 /// to get around any mathematical concerns resulting from
1828 /// referencing 2 in a space where 2 does no exist.
1829 unsigned nearestLogBase2() const {
1830 // Special case when we have a bitwidth of 1. If VAL is 1, then we
1831 // get 0. If VAL is 0, we get WORDTYPE_MAX which gets truncated to
1832 // UINT32_MAX.
1833 if (BitWidth == 1)
1834 return U.VAL - 1;
1835
1836 // Handle the zero case.
1837 if (isNullValue())
1838 return UINT32_MAX(4294967295U);
1839
1840 // The non-zero case is handled by computing:
1841 //
1842 // nearestLogBase2(x) = logBase2(x) + x[logBase2(x)-1].
1843 //
1844 // where x[i] is referring to the value of the ith bit of x.
1845 unsigned lg = logBase2();
1846 return lg + unsigned((*this)[lg - 1]);
1847 }
1848
1849 /// \returns the log base 2 of this APInt if its an exact power of two, -1
1850 /// otherwise
1851 int32_t exactLogBase2() const {
1852 if (!isPowerOf2())
1853 return -1;
1854 return logBase2();
1855 }
1856
1857 /// Compute the square root
1858 APInt sqrt() const;
1859
1860 /// Get the absolute value;
1861 ///
1862 /// If *this is < 0 then return -(*this), otherwise *this;
1863 APInt abs() const {
1864 if (isNegative())
1865 return -(*this);
1866 return *this;
1867 }
1868
1869 /// \returns the multiplicative inverse for a given modulo.
1870 APInt multiplicativeInverse(const APInt &modulo) const;
1871
1872 /// @}
1873 /// \name Support for division by constant
1874 /// @{
1875
1876 /// Calculate the magic number for signed division by a constant.
1877 struct ms;
1878 ms magic() const;
1879
1880 /// Calculate the magic number for unsigned division by a constant.
1881 struct mu;
1882 mu magicu(unsigned LeadingZeros = 0) const;
1883
1884 /// @}
1885 /// \name Building-block Operations for APInt and APFloat
1886 /// @{
1887
1888 // These building block operations operate on a representation of arbitrary
1889 // precision, two's-complement, bignum integer values. They should be
1890 // sufficient to implement APInt and APFloat bignum requirements. Inputs are
1891 // generally a pointer to the base of an array of integer parts, representing
1892 // an unsigned bignum, and a count of how many parts there are.
1893
1894 /// Sets the least significant part of a bignum to the input value, and zeroes
1895 /// out higher parts.
1896 static void tcSet(WordType *, WordType, unsigned);
1897
1898 /// Assign one bignum to another.
1899 static void tcAssign(WordType *, const WordType *, unsigned);
1900
1901 /// Returns true if a bignum is zero, false otherwise.
1902 static bool tcIsZero(const WordType *, unsigned);
1903
1904 /// Extract the given bit of a bignum; returns 0 or 1. Zero-based.
1905 static int tcExtractBit(const WordType *, unsigned bit);
1906
1907 /// Copy the bit vector of width srcBITS from SRC, starting at bit srcLSB, to
1908 /// DST, of dstCOUNT parts, such that the bit srcLSB becomes the least
1909 /// significant bit of DST. All high bits above srcBITS in DST are
1910 /// zero-filled.
1911 static void tcExtract(WordType *, unsigned dstCount,
1912 const WordType *, unsigned srcBits,
1913 unsigned srcLSB);
1914
1915 /// Set the given bit of a bignum. Zero-based.
1916 static void tcSetBit(WordType *, unsigned bit);
1917
1918 /// Clear the given bit of a bignum. Zero-based.
1919 static void tcClearBit(WordType *, unsigned bit);
1920
1921 /// Returns the bit number of the least or most significant set bit of a
1922 /// number. If the input number has no bits set -1U is returned.
1923 static unsigned tcLSB(const WordType *, unsigned n);
1924 static unsigned tcMSB(const WordType *parts, unsigned n);
1925
1926 /// Negate a bignum in-place.
1927 static void tcNegate(WordType *, unsigned);
1928
1929 /// DST += RHS + CARRY where CARRY is zero or one. Returns the carry flag.
1930 static WordType tcAdd(WordType *, const WordType *,
1931 WordType carry, unsigned);
1932 /// DST += RHS. Returns the carry flag.
1933 static WordType tcAddPart(WordType *, WordType, unsigned);
1934
1935 /// DST -= RHS + CARRY where CARRY is zero or one. Returns the carry flag.
1936 static WordType tcSubtract(WordType *, const WordType *,
1937 WordType carry, unsigned);
1938 /// DST -= RHS. Returns the carry flag.
1939 static WordType tcSubtractPart(WordType *, WordType, unsigned);
1940
1941 /// DST += SRC * MULTIPLIER + PART if add is true
1942 /// DST = SRC * MULTIPLIER + PART if add is false
1943 ///
1944 /// Requires 0 <= DSTPARTS <= SRCPARTS + 1. If DST overlaps SRC they must
1945 /// start at the same point, i.e. DST == SRC.
1946 ///
1947 /// If DSTPARTS == SRC_PARTS + 1 no overflow occurs and zero is returned.
1948 /// Otherwise DST is filled with the least significant DSTPARTS parts of the
1949 /// result, and if all of the omitted higher parts were zero return zero,
1950 /// otherwise overflow occurred and return one.
1951 static int tcMultiplyPart(WordType *dst, const WordType *src,
1952 WordType multiplier, WordType carry,
1953 unsigned srcParts, unsigned dstParts,
1954 bool add);
1955
1956 /// DST = LHS * RHS, where DST has the same width as the operands and is
1957 /// filled with the least significant parts of the result. Returns one if
1958 /// overflow occurred, otherwise zero. DST must be disjoint from both
1959 /// operands.
1960 static int tcMultiply(WordType *, const WordType *, const WordType *,
1961 unsigned);
1962
1963 /// DST = LHS * RHS, where DST has width the sum of the widths of the
1964 /// operands. No overflow occurs. DST must be disjoint from both operands.
1965 static void tcFullMultiply(WordType *, const WordType *,
1966 const WordType *, unsigned, unsigned);
1967
1968 /// If RHS is zero LHS and REMAINDER are left unchanged, return one.
1969 /// Otherwise set LHS to LHS / RHS with the fractional part discarded, set
1970 /// REMAINDER to the remainder, return zero. i.e.
1971 ///
1972 /// OLD_LHS = RHS * LHS + REMAINDER
1973 ///
1974 /// SCRATCH is a bignum of the same size as the operands and result for use by
1975 /// the routine; its contents need not be initialized and are destroyed. LHS,
1976 /// REMAINDER and SCRATCH must be distinct.
1977 static int tcDivide(WordType *lhs, const WordType *rhs,
1978 WordType *remainder, WordType *scratch,
1979 unsigned parts);
1980
1981 /// Shift a bignum left Count bits. Shifted in bits are zero. There are no
1982 /// restrictions on Count.
1983 static void tcShiftLeft(WordType *, unsigned Words, unsigned Count);
1984
1985 /// Shift a bignum right Count bits. Shifted in bits are zero. There are no
1986 /// restrictions on Count.
1987 static void tcShiftRight(WordType *, unsigned Words, unsigned Count);
1988
1989 /// The obvious AND, OR and XOR and complement operations.
1990 static void tcAnd(WordType *, const WordType *, unsigned);
1991 static void tcOr(WordType *, const WordType *, unsigned);
1992 static void tcXor(WordType *, const WordType *, unsigned);
1993 static void tcComplement(WordType *, unsigned);
1994
1995 /// Comparison (unsigned) of two bignums.
1996 static int tcCompare(const WordType *, const WordType *, unsigned);
1997
1998 /// Increment a bignum in-place. Return the carry flag.
1999 static WordType tcIncrement(WordType *dst, unsigned parts) {
2000 return tcAddPart(dst, 1, parts);
2001 }
2002
2003 /// Decrement a bignum in-place. Return the borrow flag.
2004 static WordType tcDecrement(WordType *dst, unsigned parts) {
2005 return tcSubtractPart(dst, 1, parts);
2006 }
2007
2008 /// Set the least significant BITS and clear the rest.
2009 static void tcSetLeastSignificantBits(WordType *, unsigned, unsigned bits);
2010
2011 /// debug method
2012 void dump() const;
2013
2014 /// @}
2015};
2016
2017/// Magic data for optimising signed division by a constant.
2018struct APInt::ms {
2019 APInt m; ///< magic number
2020 unsigned s; ///< shift amount
2021};
2022
2023/// Magic data for optimising unsigned division by a constant.
2024struct APInt::mu {
2025 APInt m; ///< magic number
2026 bool a; ///< add indicator
2027 unsigned s; ///< shift amount
2028};
2029
2030inline bool operator==(uint64_t V1, const APInt &V2) { return V2 == V1; }
2031
2032inline bool operator!=(uint64_t V1, const APInt &V2) { return V2 != V1; }
2033
2034/// Unary bitwise complement operator.
2035///
2036/// \returns an APInt that is the bitwise complement of \p v.
2037inline APInt operator~(APInt v) {
2038 v.flipAllBits();
2039 return v;
2040}
2041
2042inline APInt operator&(APInt a, const APInt &b) {
2043 a &= b;
2044 return a;
2045}
2046
2047inline APInt operator&(const APInt &a, APInt &&b) {
2048 b &= a;
2049 return std::move(b);
2050}
2051
2052inline APInt operator&(APInt a, uint64_t RHS) {
2053 a &= RHS;
2054 return a;
2055}
2056
2057inline APInt operator&(uint64_t LHS, APInt b) {
2058 b &= LHS;
2059 return b;
2060}
2061
2062inline APInt operator|(APInt a, const APInt &b) {
2063 a |= b;
2064 return a;
2065}
2066
2067inline APInt operator|(const APInt &a, APInt &&b) {
2068 b |= a;
2069 return std::move(b);
2070}
2071
2072inline APInt operator|(APInt a, uint64_t RHS) {
2073 a |= RHS;
2074 return a;
2075}
2076
2077inline APInt operator|(uint64_t LHS, APInt b) {
2078 b |= LHS;
2079 return b;
2080}
2081
2082inline APInt operator^(APInt a, const APInt &b) {
2083 a ^= b;
2084 return a;
2085}
2086
2087inline APInt operator^(const APInt &a, APInt &&b) {
2088 b ^= a;
2089 return std::move(b);
2090}
2091
2092inline APInt operator^(APInt a, uint64_t RHS) {
2093 a ^= RHS;
2094 return a;
2095}
2096
2097inline APInt operator^(uint64_t LHS, APInt b) {
2098 b ^= LHS;
2099 return b;
2100}
2101
2102inline raw_ostream &operator<<(raw_ostream &OS, const APInt &I) {
2103 I.print(OS, true);
2104 return OS;
2105}
2106
2107inline APInt operator-(APInt v) {
2108 v.negate();
2109 return v;
2110}
2111
2112inline APInt operator+(APInt a, const APInt &b) {
2113 a += b;
2114 return a;
2115}
2116
2117inline APInt operator+(const APInt &a, APInt &&b) {
2118 b += a;
2119 return std::move(b);
2120}
2121
2122inline APInt operator+(APInt a, uint64_t RHS) {
2123 a += RHS;
2124 return a;
2125}
2126
2127inline APInt operator+(uint64_t LHS, APInt b) {
2128 b += LHS;
2129 return b;
2130}
2131
2132inline APInt operator-(APInt a, const APInt &b) {
2133 a -= b;
2134 return a;
2135}
2136
2137inline APInt operator-(const APInt &a, APInt &&b) {
2138 b.negate();
2139 b += a;
2140 return std::move(b);
2141}
2142
2143inline APInt operator-(APInt a, uint64_t RHS) {
2144 a -= RHS;
2145 return a;
2146}
2147
2148inline APInt operator-(uint64_t LHS, APInt b) {
2149 b.negate();
2150 b += LHS;
2151 return b;
2152}
2153
2154inline APInt operator*(APInt a, uint64_t RHS) {
2155 a *= RHS;
2156 return a;
2157}
2158
2159inline APInt operator*(uint64_t LHS, APInt b) {
2160 b *= LHS;
2161 return b;
2162}
2163
2164
2165namespace APIntOps {
2166
2167/// Determine the smaller of two APInts considered to be signed.
2168inline const APInt &smin(const APInt &A, const APInt &B) {
2169 return A.slt(B) ? A : B;
2170}
2171
2172/// Determine the larger of two APInts considered to be signed.
2173inline const APInt &smax(const APInt &A, const APInt &B) {
2174 return A.sgt(B) ? A : B;
2175}
2176
2177/// Determine the smaller of two APInts considered to be unsigned.
2178inline const APInt &umin(const APInt &A, const APInt &B) {
2179 return A.ult(B) ? A : B;
2180}
2181
2182/// Determine the larger of two APInts considered to be unsigned.
2183inline const APInt &umax(const APInt &A, const APInt &B) {
2184 return A.ugt(B) ? A : B;
2185}
2186
2187/// Compute GCD of two unsigned APInt values.
2188///
2189/// This function returns the greatest common divisor of the two APInt values
2190/// using Stein's algorithm.
2191///
2192/// \returns the greatest common divisor of A and B.
2193APInt GreatestCommonDivisor(APInt A, APInt B);
2194
2195/// Converts the given APInt to a double value.
2196///
2197/// Treats the APInt as an unsigned value for conversion purposes.
2198inline double RoundAPIntToDouble(const APInt &APIVal) {
2199 return APIVal.roundToDouble();
2200}
2201
2202/// Converts the given APInt to a double value.
2203///
2204/// Treats the APInt as a signed value for conversion purposes.
2205inline double RoundSignedAPIntToDouble(const APInt &APIVal) {
2206 return APIVal.signedRoundToDouble();
2207}
2208
2209/// Converts the given APInt to a float value.
2210inline float RoundAPIntToFloat(const APInt &APIVal) {
2211 return float(RoundAPIntToDouble(APIVal));
2212}
2213
2214/// Converts the given APInt to a float value.
2215///
2216/// Treats the APInt as a signed value for conversion purposes.
2217inline float RoundSignedAPIntToFloat(const APInt &APIVal) {
2218 return float(APIVal.signedRoundToDouble());
2219}
2220
2221/// Converts the given double value into a APInt.
2222///
2223/// This function convert a double value to an APInt value.
2224APInt RoundDoubleToAPInt(double Double, unsigned width);
2225
2226/// Converts a float value into a APInt.
2227///
2228/// Converts a float value into an APInt value.
2229inline APInt RoundFloatToAPInt(float Float, unsigned width) {
2230 return RoundDoubleToAPInt(double(Float), width);
2231}
2232
2233/// Return A unsign-divided by B, rounded by the given rounding mode.
2234APInt RoundingUDiv(const APInt &A, const APInt &B, APInt::Rounding RM);
2235
2236/// Return A sign-divided by B, rounded by the given rounding mode.
2237APInt RoundingSDiv(const APInt &A, const APInt &B, APInt::Rounding RM);
2238
2239/// Let q(n) = An^2 + Bn + C, and BW = bit width of the value range
2240/// (e.g. 32 for i32).
2241/// This function finds the smallest number n, such that
2242/// (a) n >= 0 and q(n) = 0, or
2243/// (b) n >= 1 and q(n-1) and q(n), when evaluated in the set of all
2244/// integers, belong to two different intervals [Rk, Rk+R),
2245/// where R = 2^BW, and k is an integer.
2246/// The idea here is to find when q(n) "overflows" 2^BW, while at the
2247/// same time "allowing" subtraction. In unsigned modulo arithmetic a
2248/// subtraction (treated as addition of negated numbers) would always
2249/// count as an overflow, but here we want to allow values to decrease
2250/// and increase as long as they are within the same interval.
2251/// Specifically, adding of two negative numbers should not cause an
2252/// overflow (as long as the magnitude does not exceed the bit width).
2253/// On the other hand, given a positive number, adding a negative
2254/// number to it can give a negative result, which would cause the
2255/// value to go from [-2^BW, 0) to [0, 2^BW). In that sense, zero is
2256/// treated as a special case of an overflow.
2257///
2258/// This function returns None if after finding k that minimizes the
2259/// positive solution to q(n) = kR, both solutions are contained between
2260/// two consecutive integers.
2261///
2262/// There are cases where q(n) > T, and q(n+1) < T (assuming evaluation
2263/// in arithmetic modulo 2^BW, and treating the values as signed) by the
2264/// virtue of *signed* overflow. This function will *not* find such an n,
2265/// however it may find a value of n satisfying the inequalities due to
2266/// an *unsigned* overflow (if the values are treated as unsigned).
2267/// To find a solution for a signed overflow, treat it as a problem of
2268/// finding an unsigned overflow with a range with of BW-1.
2269///
2270/// The returned value may have a different bit width from the input
2271/// coefficients.
2272Optional<APInt> SolveQuadraticEquationWrap(APInt A, APInt B, APInt C,
2273 unsigned RangeWidth);
2274
2275/// Compare two values, and if they are different, return the position of the
2276/// most significant bit that is different in the values.
2277Optional<unsigned> GetMostSignificantDifferentBit(const APInt &A,
2278 const APInt &B);
2279
2280} // End of APIntOps namespace
2281
2282// See friend declaration above. This additional declaration is required in
2283// order to compile LLVM with IBM xlC compiler.
2284hash_code hash_value(const APInt &Arg);
2285
2286/// StoreIntToMemory - Fills the StoreBytes bytes of memory starting from Dst
2287/// with the integer held in IntVal.
2288void StoreIntToMemory(const APInt &IntVal, uint8_t *Dst, unsigned StoreBytes);
2289
2290/// LoadIntFromMemory - Loads the integer stored in the LoadBytes bytes starting
2291/// from Src into IntVal, which is assumed to be wide enough and to hold zero.
2292void LoadIntFromMemory(APInt &IntVal, const uint8_t *Src, unsigned LoadBytes);
2293
2294/// Provide DenseMapInfo for APInt.
2295template <> struct DenseMapInfo<APInt> {
2296 static inline APInt getEmptyKey() {
2297 APInt V(nullptr, 0);
2298 V.U.VAL = 0;
2299 return V;
2300 }
2301
2302 static inline APInt getTombstoneKey() {
2303 APInt V(nullptr, 0);
2304 V.U.VAL = 1;
2305 return V;
2306 }
2307
2308 static unsigned getHashValue(const APInt &Key);
2309
2310 static bool isEqual(const APInt &LHS, const APInt &RHS) {
2311 return LHS.getBitWidth() == RHS.getBitWidth() && LHS == RHS;
2312 }
2313};
2314
2315} // namespace llvm
2316
2317#endif