File: | llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp |
Warning: | line 1363, column 26 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===// | ||||||||
2 | // | ||||||||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | ||||||||
4 | // See https://llvm.org/LICENSE.txt for license information. | ||||||||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | ||||||||
6 | // | ||||||||
7 | //===----------------------------------------------------------------------===// | ||||||||
8 | // | ||||||||
9 | // This pass performs various transformations related to eliminating memcpy | ||||||||
10 | // calls, or transforming sets of stores into memset's. | ||||||||
11 | // | ||||||||
12 | //===----------------------------------------------------------------------===// | ||||||||
13 | |||||||||
14 | #include "llvm/Transforms/Scalar/MemCpyOptimizer.h" | ||||||||
15 | #include "llvm/ADT/DenseSet.h" | ||||||||
16 | #include "llvm/ADT/None.h" | ||||||||
17 | #include "llvm/ADT/STLExtras.h" | ||||||||
18 | #include "llvm/ADT/SmallVector.h" | ||||||||
19 | #include "llvm/ADT/Statistic.h" | ||||||||
20 | #include "llvm/ADT/iterator_range.h" | ||||||||
21 | #include "llvm/Analysis/AliasAnalysis.h" | ||||||||
22 | #include "llvm/Analysis/AssumptionCache.h" | ||||||||
23 | #include "llvm/Analysis/GlobalsModRef.h" | ||||||||
24 | #include "llvm/Analysis/Loads.h" | ||||||||
25 | #include "llvm/Analysis/MemoryDependenceAnalysis.h" | ||||||||
26 | #include "llvm/Analysis/MemoryLocation.h" | ||||||||
27 | #include "llvm/Analysis/MemorySSA.h" | ||||||||
28 | #include "llvm/Analysis/MemorySSAUpdater.h" | ||||||||
29 | #include "llvm/Analysis/TargetLibraryInfo.h" | ||||||||
30 | #include "llvm/Analysis/ValueTracking.h" | ||||||||
31 | #include "llvm/IR/Argument.h" | ||||||||
32 | #include "llvm/IR/BasicBlock.h" | ||||||||
33 | #include "llvm/IR/Constants.h" | ||||||||
34 | #include "llvm/IR/DataLayout.h" | ||||||||
35 | #include "llvm/IR/DerivedTypes.h" | ||||||||
36 | #include "llvm/IR/Dominators.h" | ||||||||
37 | #include "llvm/IR/Function.h" | ||||||||
38 | #include "llvm/IR/GetElementPtrTypeIterator.h" | ||||||||
39 | #include "llvm/IR/GlobalVariable.h" | ||||||||
40 | #include "llvm/IR/IRBuilder.h" | ||||||||
41 | #include "llvm/IR/InstrTypes.h" | ||||||||
42 | #include "llvm/IR/Instruction.h" | ||||||||
43 | #include "llvm/IR/Instructions.h" | ||||||||
44 | #include "llvm/IR/IntrinsicInst.h" | ||||||||
45 | #include "llvm/IR/Intrinsics.h" | ||||||||
46 | #include "llvm/IR/LLVMContext.h" | ||||||||
47 | #include "llvm/IR/Module.h" | ||||||||
48 | #include "llvm/IR/Operator.h" | ||||||||
49 | #include "llvm/IR/PassManager.h" | ||||||||
50 | #include "llvm/IR/Type.h" | ||||||||
51 | #include "llvm/IR/User.h" | ||||||||
52 | #include "llvm/IR/Value.h" | ||||||||
53 | #include "llvm/InitializePasses.h" | ||||||||
54 | #include "llvm/Pass.h" | ||||||||
55 | #include "llvm/Support/Casting.h" | ||||||||
56 | #include "llvm/Support/Debug.h" | ||||||||
57 | #include "llvm/Support/MathExtras.h" | ||||||||
58 | #include "llvm/Support/raw_ostream.h" | ||||||||
59 | #include "llvm/Transforms/Scalar.h" | ||||||||
60 | #include "llvm/Transforms/Utils/Local.h" | ||||||||
61 | #include <algorithm> | ||||||||
62 | #include <cassert> | ||||||||
63 | #include <cstdint> | ||||||||
64 | #include <utility> | ||||||||
65 | |||||||||
66 | using namespace llvm; | ||||||||
67 | |||||||||
68 | #define DEBUG_TYPE"memcpyopt" "memcpyopt" | ||||||||
69 | |||||||||
70 | static cl::opt<bool> | ||||||||
71 | EnableMemorySSA("enable-memcpyopt-memoryssa", cl::init(false), cl::Hidden, | ||||||||
72 | cl::desc("Use MemorySSA-backed MemCpyOpt.")); | ||||||||
73 | |||||||||
74 | STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted")static llvm::Statistic NumMemCpyInstr = {"memcpyopt", "NumMemCpyInstr" , "Number of memcpy instructions deleted"}; | ||||||||
75 | STATISTIC(NumMemSetInfer, "Number of memsets inferred")static llvm::Statistic NumMemSetInfer = {"memcpyopt", "NumMemSetInfer" , "Number of memsets inferred"}; | ||||||||
76 | STATISTIC(NumMoveToCpy, "Number of memmoves converted to memcpy")static llvm::Statistic NumMoveToCpy = {"memcpyopt", "NumMoveToCpy" , "Number of memmoves converted to memcpy"}; | ||||||||
77 | STATISTIC(NumCpyToSet, "Number of memcpys converted to memset")static llvm::Statistic NumCpyToSet = {"memcpyopt", "NumCpyToSet" , "Number of memcpys converted to memset"}; | ||||||||
78 | STATISTIC(NumCallSlot, "Number of call slot optimizations performed")static llvm::Statistic NumCallSlot = {"memcpyopt", "NumCallSlot" , "Number of call slot optimizations performed"}; | ||||||||
79 | |||||||||
80 | namespace { | ||||||||
81 | |||||||||
82 | /// Represents a range of memset'd bytes with the ByteVal value. | ||||||||
83 | /// This allows us to analyze stores like: | ||||||||
84 | /// store 0 -> P+1 | ||||||||
85 | /// store 0 -> P+0 | ||||||||
86 | /// store 0 -> P+3 | ||||||||
87 | /// store 0 -> P+2 | ||||||||
88 | /// which sometimes happens with stores to arrays of structs etc. When we see | ||||||||
89 | /// the first store, we make a range [1, 2). The second store extends the range | ||||||||
90 | /// to [0, 2). The third makes a new range [2, 3). The fourth store joins the | ||||||||
91 | /// two ranges into [0, 3) which is memset'able. | ||||||||
92 | struct MemsetRange { | ||||||||
93 | // Start/End - A semi range that describes the span that this range covers. | ||||||||
94 | // The range is closed at the start and open at the end: [Start, End). | ||||||||
95 | int64_t Start, End; | ||||||||
96 | |||||||||
97 | /// StartPtr - The getelementptr instruction that points to the start of the | ||||||||
98 | /// range. | ||||||||
99 | Value *StartPtr; | ||||||||
100 | |||||||||
101 | /// Alignment - The known alignment of the first store. | ||||||||
102 | unsigned Alignment; | ||||||||
103 | |||||||||
104 | /// TheStores - The actual stores that make up this range. | ||||||||
105 | SmallVector<Instruction*, 16> TheStores; | ||||||||
106 | |||||||||
107 | bool isProfitableToUseMemset(const DataLayout &DL) const; | ||||||||
108 | }; | ||||||||
109 | |||||||||
110 | } // end anonymous namespace | ||||||||
111 | |||||||||
112 | bool MemsetRange::isProfitableToUseMemset(const DataLayout &DL) const { | ||||||||
113 | // If we found more than 4 stores to merge or 16 bytes, use memset. | ||||||||
114 | if (TheStores.size() >= 4 || End-Start >= 16) return true; | ||||||||
115 | |||||||||
116 | // If there is nothing to merge, don't do anything. | ||||||||
117 | if (TheStores.size() < 2) return false; | ||||||||
118 | |||||||||
119 | // If any of the stores are a memset, then it is always good to extend the | ||||||||
120 | // memset. | ||||||||
121 | for (Instruction *SI : TheStores) | ||||||||
122 | if (!isa<StoreInst>(SI)) | ||||||||
123 | return true; | ||||||||
124 | |||||||||
125 | // Assume that the code generator is capable of merging pairs of stores | ||||||||
126 | // together if it wants to. | ||||||||
127 | if (TheStores.size() == 2) return false; | ||||||||
128 | |||||||||
129 | // If we have fewer than 8 stores, it can still be worthwhile to do this. | ||||||||
130 | // For example, merging 4 i8 stores into an i32 store is useful almost always. | ||||||||
131 | // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the | ||||||||
132 | // memset will be split into 2 32-bit stores anyway) and doing so can | ||||||||
133 | // pessimize the llvm optimizer. | ||||||||
134 | // | ||||||||
135 | // Since we don't have perfect knowledge here, make some assumptions: assume | ||||||||
136 | // the maximum GPR width is the same size as the largest legal integer | ||||||||
137 | // size. If so, check to see whether we will end up actually reducing the | ||||||||
138 | // number of stores used. | ||||||||
139 | unsigned Bytes = unsigned(End-Start); | ||||||||
140 | unsigned MaxIntSize = DL.getLargestLegalIntTypeSizeInBits() / 8; | ||||||||
141 | if (MaxIntSize == 0) | ||||||||
142 | MaxIntSize = 1; | ||||||||
143 | unsigned NumPointerStores = Bytes / MaxIntSize; | ||||||||
144 | |||||||||
145 | // Assume the remaining bytes if any are done a byte at a time. | ||||||||
146 | unsigned NumByteStores = Bytes % MaxIntSize; | ||||||||
147 | |||||||||
148 | // If we will reduce the # stores (according to this heuristic), do the | ||||||||
149 | // transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32 | ||||||||
150 | // etc. | ||||||||
151 | return TheStores.size() > NumPointerStores+NumByteStores; | ||||||||
152 | } | ||||||||
153 | |||||||||
154 | namespace { | ||||||||
155 | |||||||||
156 | class MemsetRanges { | ||||||||
157 | using range_iterator = SmallVectorImpl<MemsetRange>::iterator; | ||||||||
158 | |||||||||
159 | /// A sorted list of the memset ranges. | ||||||||
160 | SmallVector<MemsetRange, 8> Ranges; | ||||||||
161 | |||||||||
162 | const DataLayout &DL; | ||||||||
163 | |||||||||
164 | public: | ||||||||
165 | MemsetRanges(const DataLayout &DL) : DL(DL) {} | ||||||||
166 | |||||||||
167 | using const_iterator = SmallVectorImpl<MemsetRange>::const_iterator; | ||||||||
168 | |||||||||
169 | const_iterator begin() const { return Ranges.begin(); } | ||||||||
170 | const_iterator end() const { return Ranges.end(); } | ||||||||
171 | bool empty() const { return Ranges.empty(); } | ||||||||
172 | |||||||||
173 | void addInst(int64_t OffsetFromFirst, Instruction *Inst) { | ||||||||
174 | if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) | ||||||||
175 | addStore(OffsetFromFirst, SI); | ||||||||
176 | else | ||||||||
177 | addMemSet(OffsetFromFirst, cast<MemSetInst>(Inst)); | ||||||||
178 | } | ||||||||
179 | |||||||||
180 | void addStore(int64_t OffsetFromFirst, StoreInst *SI) { | ||||||||
181 | int64_t StoreSize = DL.getTypeStoreSize(SI->getOperand(0)->getType()); | ||||||||
182 | |||||||||
183 | addRange(OffsetFromFirst, StoreSize, SI->getPointerOperand(), | ||||||||
184 | SI->getAlign().value(), SI); | ||||||||
185 | } | ||||||||
186 | |||||||||
187 | void addMemSet(int64_t OffsetFromFirst, MemSetInst *MSI) { | ||||||||
188 | int64_t Size = cast<ConstantInt>(MSI->getLength())->getZExtValue(); | ||||||||
189 | addRange(OffsetFromFirst, Size, MSI->getDest(), MSI->getDestAlignment(), MSI); | ||||||||
190 | } | ||||||||
191 | |||||||||
192 | void addRange(int64_t Start, int64_t Size, Value *Ptr, | ||||||||
193 | unsigned Alignment, Instruction *Inst); | ||||||||
194 | }; | ||||||||
195 | |||||||||
196 | } // end anonymous namespace | ||||||||
197 | |||||||||
198 | /// Add a new store to the MemsetRanges data structure. This adds a | ||||||||
199 | /// new range for the specified store at the specified offset, merging into | ||||||||
200 | /// existing ranges as appropriate. | ||||||||
201 | void MemsetRanges::addRange(int64_t Start, int64_t Size, Value *Ptr, | ||||||||
202 | unsigned Alignment, Instruction *Inst) { | ||||||||
203 | int64_t End = Start+Size; | ||||||||
204 | |||||||||
205 | range_iterator I = partition_point( | ||||||||
206 | Ranges, [=](const MemsetRange &O) { return O.End < Start; }); | ||||||||
207 | |||||||||
208 | // We now know that I == E, in which case we didn't find anything to merge | ||||||||
209 | // with, or that Start <= I->End. If End < I->Start or I == E, then we need | ||||||||
210 | // to insert a new range. Handle this now. | ||||||||
211 | if (I == Ranges.end() || End < I->Start) { | ||||||||
212 | MemsetRange &R = *Ranges.insert(I, MemsetRange()); | ||||||||
213 | R.Start = Start; | ||||||||
214 | R.End = End; | ||||||||
215 | R.StartPtr = Ptr; | ||||||||
216 | R.Alignment = Alignment; | ||||||||
217 | R.TheStores.push_back(Inst); | ||||||||
218 | return; | ||||||||
219 | } | ||||||||
220 | |||||||||
221 | // This store overlaps with I, add it. | ||||||||
222 | I->TheStores.push_back(Inst); | ||||||||
223 | |||||||||
224 | // At this point, we may have an interval that completely contains our store. | ||||||||
225 | // If so, just add it to the interval and return. | ||||||||
226 | if (I->Start <= Start && I->End >= End) | ||||||||
227 | return; | ||||||||
228 | |||||||||
229 | // Now we know that Start <= I->End and End >= I->Start so the range overlaps | ||||||||
230 | // but is not entirely contained within the range. | ||||||||
231 | |||||||||
232 | // See if the range extends the start of the range. In this case, it couldn't | ||||||||
233 | // possibly cause it to join the prior range, because otherwise we would have | ||||||||
234 | // stopped on *it*. | ||||||||
235 | if (Start < I->Start) { | ||||||||
236 | I->Start = Start; | ||||||||
237 | I->StartPtr = Ptr; | ||||||||
238 | I->Alignment = Alignment; | ||||||||
239 | } | ||||||||
240 | |||||||||
241 | // Now we know that Start <= I->End and Start >= I->Start (so the startpoint | ||||||||
242 | // is in or right at the end of I), and that End >= I->Start. Extend I out to | ||||||||
243 | // End. | ||||||||
244 | if (End > I->End) { | ||||||||
245 | I->End = End; | ||||||||
246 | range_iterator NextI = I; | ||||||||
247 | while (++NextI != Ranges.end() && End >= NextI->Start) { | ||||||||
248 | // Merge the range in. | ||||||||
249 | I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end()); | ||||||||
250 | if (NextI->End > I->End) | ||||||||
251 | I->End = NextI->End; | ||||||||
252 | Ranges.erase(NextI); | ||||||||
253 | NextI = I; | ||||||||
254 | } | ||||||||
255 | } | ||||||||
256 | } | ||||||||
257 | |||||||||
258 | //===----------------------------------------------------------------------===// | ||||||||
259 | // MemCpyOptLegacyPass Pass | ||||||||
260 | //===----------------------------------------------------------------------===// | ||||||||
261 | |||||||||
262 | namespace { | ||||||||
263 | |||||||||
264 | class MemCpyOptLegacyPass : public FunctionPass { | ||||||||
265 | MemCpyOptPass Impl; | ||||||||
266 | |||||||||
267 | public: | ||||||||
268 | static char ID; // Pass identification, replacement for typeid | ||||||||
269 | |||||||||
270 | MemCpyOptLegacyPass() : FunctionPass(ID) { | ||||||||
271 | initializeMemCpyOptLegacyPassPass(*PassRegistry::getPassRegistry()); | ||||||||
272 | } | ||||||||
273 | |||||||||
274 | bool runOnFunction(Function &F) override; | ||||||||
275 | |||||||||
276 | private: | ||||||||
277 | // This transformation requires dominator postdominator info | ||||||||
278 | void getAnalysisUsage(AnalysisUsage &AU) const override { | ||||||||
279 | AU.setPreservesCFG(); | ||||||||
280 | AU.addRequired<AssumptionCacheTracker>(); | ||||||||
281 | AU.addRequired<DominatorTreeWrapperPass>(); | ||||||||
282 | AU.addPreserved<DominatorTreeWrapperPass>(); | ||||||||
283 | AU.addPreserved<GlobalsAAWrapperPass>(); | ||||||||
284 | AU.addRequired<TargetLibraryInfoWrapperPass>(); | ||||||||
285 | if (!EnableMemorySSA) | ||||||||
286 | AU.addRequired<MemoryDependenceWrapperPass>(); | ||||||||
287 | AU.addPreserved<MemoryDependenceWrapperPass>(); | ||||||||
288 | AU.addRequired<AAResultsWrapperPass>(); | ||||||||
289 | AU.addPreserved<AAResultsWrapperPass>(); | ||||||||
290 | if (EnableMemorySSA) | ||||||||
291 | AU.addRequired<MemorySSAWrapperPass>(); | ||||||||
292 | AU.addPreserved<MemorySSAWrapperPass>(); | ||||||||
293 | } | ||||||||
294 | }; | ||||||||
295 | |||||||||
296 | } // end anonymous namespace | ||||||||
297 | |||||||||
298 | char MemCpyOptLegacyPass::ID = 0; | ||||||||
299 | |||||||||
300 | /// The public interface to this file... | ||||||||
301 | FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOptLegacyPass(); } | ||||||||
302 | |||||||||
303 | INITIALIZE_PASS_BEGIN(MemCpyOptLegacyPass, "memcpyopt", "MemCpy Optimization",static void *initializeMemCpyOptLegacyPassPassOnce(PassRegistry &Registry) { | ||||||||
304 | false, false)static void *initializeMemCpyOptLegacyPassPassOnce(PassRegistry &Registry) { | ||||||||
305 | INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)initializeAssumptionCacheTrackerPass(Registry); | ||||||||
306 | INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)initializeDominatorTreeWrapperPassPass(Registry); | ||||||||
307 | INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)initializeMemoryDependenceWrapperPassPass(Registry); | ||||||||
308 | INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)initializeTargetLibraryInfoWrapperPassPass(Registry); | ||||||||
309 | INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)initializeAAResultsWrapperPassPass(Registry); | ||||||||
310 | INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)initializeGlobalsAAWrapperPassPass(Registry); | ||||||||
311 | INITIALIZE_PASS_END(MemCpyOptLegacyPass, "memcpyopt", "MemCpy Optimization",PassInfo *PI = new PassInfo( "MemCpy Optimization", "memcpyopt" , &MemCpyOptLegacyPass::ID, PassInfo::NormalCtor_t(callDefaultCtor <MemCpyOptLegacyPass>), false, false); Registry.registerPass (*PI, true); return PI; } static llvm::once_flag InitializeMemCpyOptLegacyPassPassFlag ; void llvm::initializeMemCpyOptLegacyPassPass(PassRegistry & Registry) { llvm::call_once(InitializeMemCpyOptLegacyPassPassFlag , initializeMemCpyOptLegacyPassPassOnce, std::ref(Registry)); } | ||||||||
312 | false, false)PassInfo *PI = new PassInfo( "MemCpy Optimization", "memcpyopt" , &MemCpyOptLegacyPass::ID, PassInfo::NormalCtor_t(callDefaultCtor <MemCpyOptLegacyPass>), false, false); Registry.registerPass (*PI, true); return PI; } static llvm::once_flag InitializeMemCpyOptLegacyPassPassFlag ; void llvm::initializeMemCpyOptLegacyPassPass(PassRegistry & Registry) { llvm::call_once(InitializeMemCpyOptLegacyPassPassFlag , initializeMemCpyOptLegacyPassPassOnce, std::ref(Registry)); } | ||||||||
313 | |||||||||
314 | // Check that V is either not accessible by the caller, or unwinding cannot | ||||||||
315 | // occur between Start and End. | ||||||||
316 | static bool mayBeVisibleThroughUnwinding(Value *V, Instruction *Start, | ||||||||
317 | Instruction *End) { | ||||||||
318 | assert(Start->getParent() == End->getParent() && "Must be in same block")((Start->getParent() == End->getParent() && "Must be in same block" ) ? static_cast<void> (0) : __assert_fail ("Start->getParent() == End->getParent() && \"Must be in same block\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp" , 318, __PRETTY_FUNCTION__)); | ||||||||
319 | if (!Start->getFunction()->doesNotThrow() && | ||||||||
320 | !isa<AllocaInst>(getUnderlyingObject(V))) { | ||||||||
321 | for (const Instruction &I : | ||||||||
322 | make_range(Start->getIterator(), End->getIterator())) { | ||||||||
323 | if (I.mayThrow()) | ||||||||
324 | return true; | ||||||||
325 | } | ||||||||
326 | } | ||||||||
327 | return false; | ||||||||
328 | } | ||||||||
329 | |||||||||
330 | void MemCpyOptPass::eraseInstruction(Instruction *I) { | ||||||||
331 | if (MSSAU) | ||||||||
332 | MSSAU->removeMemoryAccess(I); | ||||||||
333 | if (MD) | ||||||||
334 | MD->removeInstruction(I); | ||||||||
335 | I->eraseFromParent(); | ||||||||
336 | } | ||||||||
337 | |||||||||
338 | // Check for mod or ref of Loc between Start and End, excluding both boundaries. | ||||||||
339 | // Start and End must be in the same block | ||||||||
340 | static bool accessedBetween(AliasAnalysis &AA, MemoryLocation Loc, | ||||||||
341 | const MemoryUseOrDef *Start, | ||||||||
342 | const MemoryUseOrDef *End) { | ||||||||
343 | assert(Start->getBlock() == End->getBlock() && "Only local supported")((Start->getBlock() == End->getBlock() && "Only local supported" ) ? static_cast<void> (0) : __assert_fail ("Start->getBlock() == End->getBlock() && \"Only local supported\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp" , 343, __PRETTY_FUNCTION__)); | ||||||||
344 | for (const MemoryAccess &MA : | ||||||||
345 | make_range(++Start->getIterator(), End->getIterator())) { | ||||||||
346 | if (isModOrRefSet(AA.getModRefInfo(cast<MemoryUseOrDef>(MA).getMemoryInst(), | ||||||||
347 | Loc))) | ||||||||
348 | return true; | ||||||||
349 | } | ||||||||
350 | return false; | ||||||||
351 | } | ||||||||
352 | |||||||||
353 | // Check for mod of Loc between Start and End, excluding both boundaries. | ||||||||
354 | // Start and End can be in different blocks. | ||||||||
355 | static bool writtenBetween(MemorySSA *MSSA, MemoryLocation Loc, | ||||||||
356 | const MemoryUseOrDef *Start, | ||||||||
357 | const MemoryUseOrDef *End) { | ||||||||
358 | // TODO: Only walk until we hit Start. | ||||||||
359 | MemoryAccess *Clobber = MSSA->getWalker()->getClobberingMemoryAccess( | ||||||||
360 | End->getDefiningAccess(), Loc); | ||||||||
361 | return !MSSA->dominates(Clobber, Start); | ||||||||
362 | } | ||||||||
363 | |||||||||
364 | /// When scanning forward over instructions, we look for some other patterns to | ||||||||
365 | /// fold away. In particular, this looks for stores to neighboring locations of | ||||||||
366 | /// memory. If it sees enough consecutive ones, it attempts to merge them | ||||||||
367 | /// together into a memcpy/memset. | ||||||||
368 | Instruction *MemCpyOptPass::tryMergingIntoMemset(Instruction *StartInst, | ||||||||
369 | Value *StartPtr, | ||||||||
370 | Value *ByteVal) { | ||||||||
371 | const DataLayout &DL = StartInst->getModule()->getDataLayout(); | ||||||||
372 | |||||||||
373 | // Okay, so we now have a single store that can be splatable. Scan to find | ||||||||
374 | // all subsequent stores of the same value to offset from the same pointer. | ||||||||
375 | // Join these together into ranges, so we can decide whether contiguous blocks | ||||||||
376 | // are stored. | ||||||||
377 | MemsetRanges Ranges(DL); | ||||||||
378 | |||||||||
379 | BasicBlock::iterator BI(StartInst); | ||||||||
380 | |||||||||
381 | // Keeps track of the last memory use or def before the insertion point for | ||||||||
382 | // the new memset. The new MemoryDef for the inserted memsets will be inserted | ||||||||
383 | // after MemInsertPoint. It points to either LastMemDef or to the last user | ||||||||
384 | // before the insertion point of the memset, if there are any such users. | ||||||||
385 | MemoryUseOrDef *MemInsertPoint = nullptr; | ||||||||
386 | // Keeps track of the last MemoryDef between StartInst and the insertion point | ||||||||
387 | // for the new memset. This will become the defining access of the inserted | ||||||||
388 | // memsets. | ||||||||
389 | MemoryDef *LastMemDef = nullptr; | ||||||||
390 | for (++BI; !BI->isTerminator(); ++BI) { | ||||||||
391 | if (MSSAU) { | ||||||||
392 | auto *CurrentAcc = cast_or_null<MemoryUseOrDef>( | ||||||||
393 | MSSAU->getMemorySSA()->getMemoryAccess(&*BI)); | ||||||||
394 | if (CurrentAcc) { | ||||||||
395 | MemInsertPoint = CurrentAcc; | ||||||||
396 | if (auto *CurrentDef = dyn_cast<MemoryDef>(CurrentAcc)) | ||||||||
397 | LastMemDef = CurrentDef; | ||||||||
398 | } | ||||||||
399 | } | ||||||||
400 | |||||||||
401 | if (!isa<StoreInst>(BI) && !isa<MemSetInst>(BI)) { | ||||||||
402 | // If the instruction is readnone, ignore it, otherwise bail out. We | ||||||||
403 | // don't even allow readonly here because we don't want something like: | ||||||||
404 | // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A). | ||||||||
405 | if (BI->mayWriteToMemory() || BI->mayReadFromMemory()) | ||||||||
406 | break; | ||||||||
407 | continue; | ||||||||
408 | } | ||||||||
409 | |||||||||
410 | if (StoreInst *NextStore = dyn_cast<StoreInst>(BI)) { | ||||||||
411 | // If this is a store, see if we can merge it in. | ||||||||
412 | if (!NextStore->isSimple()) break; | ||||||||
413 | |||||||||
414 | Value *StoredVal = NextStore->getValueOperand(); | ||||||||
415 | |||||||||
416 | // Don't convert stores of non-integral pointer types to memsets (which | ||||||||
417 | // stores integers). | ||||||||
418 | if (DL.isNonIntegralPointerType(StoredVal->getType()->getScalarType())) | ||||||||
419 | break; | ||||||||
420 | |||||||||
421 | // Check to see if this stored value is of the same byte-splattable value. | ||||||||
422 | Value *StoredByte = isBytewiseValue(StoredVal, DL); | ||||||||
423 | if (isa<UndefValue>(ByteVal) && StoredByte) | ||||||||
424 | ByteVal = StoredByte; | ||||||||
425 | if (ByteVal != StoredByte) | ||||||||
426 | break; | ||||||||
427 | |||||||||
428 | // Check to see if this store is to a constant offset from the start ptr. | ||||||||
429 | Optional<int64_t> Offset = | ||||||||
430 | isPointerOffset(StartPtr, NextStore->getPointerOperand(), DL); | ||||||||
431 | if (!Offset) | ||||||||
432 | break; | ||||||||
433 | |||||||||
434 | Ranges.addStore(*Offset, NextStore); | ||||||||
435 | } else { | ||||||||
436 | MemSetInst *MSI = cast<MemSetInst>(BI); | ||||||||
437 | |||||||||
438 | if (MSI->isVolatile() || ByteVal != MSI->getValue() || | ||||||||
439 | !isa<ConstantInt>(MSI->getLength())) | ||||||||
440 | break; | ||||||||
441 | |||||||||
442 | // Check to see if this store is to a constant offset from the start ptr. | ||||||||
443 | Optional<int64_t> Offset = isPointerOffset(StartPtr, MSI->getDest(), DL); | ||||||||
444 | if (!Offset) | ||||||||
445 | break; | ||||||||
446 | |||||||||
447 | Ranges.addMemSet(*Offset, MSI); | ||||||||
448 | } | ||||||||
449 | } | ||||||||
450 | |||||||||
451 | // If we have no ranges, then we just had a single store with nothing that | ||||||||
452 | // could be merged in. This is a very common case of course. | ||||||||
453 | if (Ranges.empty()) | ||||||||
454 | return nullptr; | ||||||||
455 | |||||||||
456 | // If we had at least one store that could be merged in, add the starting | ||||||||
457 | // store as well. We try to avoid this unless there is at least something | ||||||||
458 | // interesting as a small compile-time optimization. | ||||||||
459 | Ranges.addInst(0, StartInst); | ||||||||
460 | |||||||||
461 | // If we create any memsets, we put it right before the first instruction that | ||||||||
462 | // isn't part of the memset block. This ensure that the memset is dominated | ||||||||
463 | // by any addressing instruction needed by the start of the block. | ||||||||
464 | IRBuilder<> Builder(&*BI); | ||||||||
465 | |||||||||
466 | // Now that we have full information about ranges, loop over the ranges and | ||||||||
467 | // emit memset's for anything big enough to be worthwhile. | ||||||||
468 | Instruction *AMemSet = nullptr; | ||||||||
469 | for (const MemsetRange &Range : Ranges) { | ||||||||
470 | if (Range.TheStores.size() == 1) continue; | ||||||||
471 | |||||||||
472 | // If it is profitable to lower this range to memset, do so now. | ||||||||
473 | if (!Range.isProfitableToUseMemset(DL)) | ||||||||
474 | continue; | ||||||||
475 | |||||||||
476 | // Otherwise, we do want to transform this! Create a new memset. | ||||||||
477 | // Get the starting pointer of the block. | ||||||||
478 | StartPtr = Range.StartPtr; | ||||||||
479 | |||||||||
480 | AMemSet = Builder.CreateMemSet(StartPtr, ByteVal, Range.End - Range.Start, | ||||||||
481 | MaybeAlign(Range.Alignment)); | ||||||||
482 | LLVM_DEBUG(dbgs() << "Replace stores:\n"; for (Instruction *SIdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memcpyopt")) { dbgs() << "Replace stores:\n"; for (Instruction *SI : Range.TheStores) dbgs() << *SI << '\n'; dbgs () << "With: " << *AMemSet << '\n'; } } while (false) | ||||||||
483 | : Range.TheStores) dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memcpyopt")) { dbgs() << "Replace stores:\n"; for (Instruction *SI : Range.TheStores) dbgs() << *SI << '\n'; dbgs () << "With: " << *AMemSet << '\n'; } } while (false) | ||||||||
484 | << *SI << '\n';do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memcpyopt")) { dbgs() << "Replace stores:\n"; for (Instruction *SI : Range.TheStores) dbgs() << *SI << '\n'; dbgs () << "With: " << *AMemSet << '\n'; } } while (false) | ||||||||
485 | dbgs() << "With: " << *AMemSet << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memcpyopt")) { dbgs() << "Replace stores:\n"; for (Instruction *SI : Range.TheStores) dbgs() << *SI << '\n'; dbgs () << "With: " << *AMemSet << '\n'; } } while (false); | ||||||||
486 | if (!Range.TheStores.empty()) | ||||||||
487 | AMemSet->setDebugLoc(Range.TheStores[0]->getDebugLoc()); | ||||||||
488 | |||||||||
489 | if (MSSAU) { | ||||||||
490 | assert(LastMemDef && MemInsertPoint &&((LastMemDef && MemInsertPoint && "Both LastMemDef and MemInsertPoint need to be set" ) ? static_cast<void> (0) : __assert_fail ("LastMemDef && MemInsertPoint && \"Both LastMemDef and MemInsertPoint need to be set\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp" , 491, __PRETTY_FUNCTION__)) | ||||||||
491 | "Both LastMemDef and MemInsertPoint need to be set")((LastMemDef && MemInsertPoint && "Both LastMemDef and MemInsertPoint need to be set" ) ? static_cast<void> (0) : __assert_fail ("LastMemDef && MemInsertPoint && \"Both LastMemDef and MemInsertPoint need to be set\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp" , 491, __PRETTY_FUNCTION__)); | ||||||||
492 | auto *NewDef = | ||||||||
493 | cast<MemoryDef>(MemInsertPoint->getMemoryInst() == &*BI | ||||||||
494 | ? MSSAU->createMemoryAccessBefore( | ||||||||
495 | AMemSet, LastMemDef, MemInsertPoint) | ||||||||
496 | : MSSAU->createMemoryAccessAfter( | ||||||||
497 | AMemSet, LastMemDef, MemInsertPoint)); | ||||||||
498 | MSSAU->insertDef(NewDef, /*RenameUses=*/true); | ||||||||
499 | LastMemDef = NewDef; | ||||||||
500 | MemInsertPoint = NewDef; | ||||||||
501 | } | ||||||||
502 | |||||||||
503 | // Zap all the stores. | ||||||||
504 | for (Instruction *SI : Range.TheStores) | ||||||||
505 | eraseInstruction(SI); | ||||||||
506 | |||||||||
507 | ++NumMemSetInfer; | ||||||||
508 | } | ||||||||
509 | |||||||||
510 | return AMemSet; | ||||||||
511 | } | ||||||||
512 | |||||||||
513 | // This method try to lift a store instruction before position P. | ||||||||
514 | // It will lift the store and its argument + that anything that | ||||||||
515 | // may alias with these. | ||||||||
516 | // The method returns true if it was successful. | ||||||||
517 | bool MemCpyOptPass::moveUp(StoreInst *SI, Instruction *P, const LoadInst *LI) { | ||||||||
518 | // If the store alias this position, early bail out. | ||||||||
519 | MemoryLocation StoreLoc = MemoryLocation::get(SI); | ||||||||
520 | if (isModOrRefSet(AA->getModRefInfo(P, StoreLoc))) | ||||||||
521 | return false; | ||||||||
522 | |||||||||
523 | // Keep track of the arguments of all instruction we plan to lift | ||||||||
524 | // so we can make sure to lift them as well if appropriate. | ||||||||
525 | DenseSet<Instruction*> Args; | ||||||||
526 | if (auto *Ptr = dyn_cast<Instruction>(SI->getPointerOperand())) | ||||||||
527 | if (Ptr->getParent() == SI->getParent()) | ||||||||
528 | Args.insert(Ptr); | ||||||||
529 | |||||||||
530 | // Instruction to lift before P. | ||||||||
531 | SmallVector<Instruction *, 8> ToLift{SI}; | ||||||||
532 | |||||||||
533 | // Memory locations of lifted instructions. | ||||||||
534 | SmallVector<MemoryLocation, 8> MemLocs{StoreLoc}; | ||||||||
535 | |||||||||
536 | // Lifted calls. | ||||||||
537 | SmallVector<const CallBase *, 8> Calls; | ||||||||
538 | |||||||||
539 | const MemoryLocation LoadLoc = MemoryLocation::get(LI); | ||||||||
540 | |||||||||
541 | for (auto I = --SI->getIterator(), E = P->getIterator(); I != E; --I) { | ||||||||
542 | auto *C = &*I; | ||||||||
543 | |||||||||
544 | // Make sure hoisting does not perform a store that was not guaranteed to | ||||||||
545 | // happen. | ||||||||
546 | if (!isGuaranteedToTransferExecutionToSuccessor(C)) | ||||||||
547 | return false; | ||||||||
548 | |||||||||
549 | bool MayAlias = isModOrRefSet(AA->getModRefInfo(C, None)); | ||||||||
550 | |||||||||
551 | bool NeedLift = false; | ||||||||
552 | if (Args.erase(C)) | ||||||||
553 | NeedLift = true; | ||||||||
554 | else if (MayAlias) { | ||||||||
555 | NeedLift = llvm::any_of(MemLocs, [C, this](const MemoryLocation &ML) { | ||||||||
556 | return isModOrRefSet(AA->getModRefInfo(C, ML)); | ||||||||
557 | }); | ||||||||
558 | |||||||||
559 | if (!NeedLift) | ||||||||
560 | NeedLift = llvm::any_of(Calls, [C, this](const CallBase *Call) { | ||||||||
561 | return isModOrRefSet(AA->getModRefInfo(C, Call)); | ||||||||
562 | }); | ||||||||
563 | } | ||||||||
564 | |||||||||
565 | if (!NeedLift) | ||||||||
566 | continue; | ||||||||
567 | |||||||||
568 | if (MayAlias) { | ||||||||
569 | // Since LI is implicitly moved downwards past the lifted instructions, | ||||||||
570 | // none of them may modify its source. | ||||||||
571 | if (isModSet(AA->getModRefInfo(C, LoadLoc))) | ||||||||
572 | return false; | ||||||||
573 | else if (const auto *Call = dyn_cast<CallBase>(C)) { | ||||||||
574 | // If we can't lift this before P, it's game over. | ||||||||
575 | if (isModOrRefSet(AA->getModRefInfo(P, Call))) | ||||||||
576 | return false; | ||||||||
577 | |||||||||
578 | Calls.push_back(Call); | ||||||||
579 | } else if (isa<LoadInst>(C) || isa<StoreInst>(C) || isa<VAArgInst>(C)) { | ||||||||
580 | // If we can't lift this before P, it's game over. | ||||||||
581 | auto ML = MemoryLocation::get(C); | ||||||||
582 | if (isModOrRefSet(AA->getModRefInfo(P, ML))) | ||||||||
583 | return false; | ||||||||
584 | |||||||||
585 | MemLocs.push_back(ML); | ||||||||
586 | } else | ||||||||
587 | // We don't know how to lift this instruction. | ||||||||
588 | return false; | ||||||||
589 | } | ||||||||
590 | |||||||||
591 | ToLift.push_back(C); | ||||||||
592 | for (unsigned k = 0, e = C->getNumOperands(); k != e; ++k) | ||||||||
593 | if (auto *A = dyn_cast<Instruction>(C->getOperand(k))) { | ||||||||
594 | if (A->getParent() == SI->getParent()) { | ||||||||
595 | // Cannot hoist user of P above P | ||||||||
596 | if(A == P) return false; | ||||||||
597 | Args.insert(A); | ||||||||
598 | } | ||||||||
599 | } | ||||||||
600 | } | ||||||||
601 | |||||||||
602 | // Find MSSA insertion point. Normally P will always have a corresponding | ||||||||
603 | // memory access before which we can insert. However, with non-standard AA | ||||||||
604 | // pipelines, there may be a mismatch between AA and MSSA, in which case we | ||||||||
605 | // will scan for a memory access before P. In either case, we know for sure | ||||||||
606 | // that at least the load will have a memory access. | ||||||||
607 | // TODO: Simplify this once P will be determined by MSSA, in which case the | ||||||||
608 | // discrepancy can no longer occur. | ||||||||
609 | MemoryUseOrDef *MemInsertPoint = nullptr; | ||||||||
610 | if (MSSAU) { | ||||||||
611 | if (MemoryUseOrDef *MA = MSSAU->getMemorySSA()->getMemoryAccess(P)) { | ||||||||
612 | MemInsertPoint = cast<MemoryUseOrDef>(--MA->getIterator()); | ||||||||
613 | } else { | ||||||||
614 | const Instruction *ConstP = P; | ||||||||
615 | for (const Instruction &I : make_range(++ConstP->getReverseIterator(), | ||||||||
616 | ++LI->getReverseIterator())) { | ||||||||
617 | if (MemoryUseOrDef *MA = MSSAU->getMemorySSA()->getMemoryAccess(&I)) { | ||||||||
618 | MemInsertPoint = MA; | ||||||||
619 | break; | ||||||||
620 | } | ||||||||
621 | } | ||||||||
622 | } | ||||||||
623 | } | ||||||||
624 | |||||||||
625 | // We made it, we need to lift. | ||||||||
626 | for (auto *I : llvm::reverse(ToLift)) { | ||||||||
627 | LLVM_DEBUG(dbgs() << "Lifting " << *I << " before " << *P << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memcpyopt")) { dbgs() << "Lifting " << *I << " before " << *P << "\n"; } } while (false); | ||||||||
628 | I->moveBefore(P); | ||||||||
629 | if (MSSAU) { | ||||||||
630 | assert(MemInsertPoint && "Must have found insert point")((MemInsertPoint && "Must have found insert point") ? static_cast<void> (0) : __assert_fail ("MemInsertPoint && \"Must have found insert point\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp" , 630, __PRETTY_FUNCTION__)); | ||||||||
631 | if (MemoryUseOrDef *MA = MSSAU->getMemorySSA()->getMemoryAccess(I)) { | ||||||||
632 | MSSAU->moveAfter(MA, MemInsertPoint); | ||||||||
633 | MemInsertPoint = MA; | ||||||||
634 | } | ||||||||
635 | } | ||||||||
636 | } | ||||||||
637 | |||||||||
638 | return true; | ||||||||
639 | } | ||||||||
640 | |||||||||
641 | bool MemCpyOptPass::processStore(StoreInst *SI, BasicBlock::iterator &BBI) { | ||||||||
642 | if (!SI->isSimple()) return false; | ||||||||
643 | |||||||||
644 | // Avoid merging nontemporal stores since the resulting | ||||||||
645 | // memcpy/memset would not be able to preserve the nontemporal hint. | ||||||||
646 | // In theory we could teach how to propagate the !nontemporal metadata to | ||||||||
647 | // memset calls. However, that change would force the backend to | ||||||||
648 | // conservatively expand !nontemporal memset calls back to sequences of | ||||||||
649 | // store instructions (effectively undoing the merging). | ||||||||
650 | if (SI->getMetadata(LLVMContext::MD_nontemporal)) | ||||||||
651 | return false; | ||||||||
652 | |||||||||
653 | const DataLayout &DL = SI->getModule()->getDataLayout(); | ||||||||
654 | |||||||||
655 | Value *StoredVal = SI->getValueOperand(); | ||||||||
656 | |||||||||
657 | // Not all the transforms below are correct for non-integral pointers, bail | ||||||||
658 | // until we've audited the individual pieces. | ||||||||
659 | if (DL.isNonIntegralPointerType(StoredVal->getType()->getScalarType())) | ||||||||
660 | return false; | ||||||||
661 | |||||||||
662 | // Load to store forwarding can be interpreted as memcpy. | ||||||||
663 | if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) { | ||||||||
664 | if (LI->isSimple() && LI->hasOneUse() && | ||||||||
665 | LI->getParent() == SI->getParent()) { | ||||||||
666 | |||||||||
667 | auto *T = LI->getType(); | ||||||||
668 | if (T->isAggregateType()) { | ||||||||
669 | MemoryLocation LoadLoc = MemoryLocation::get(LI); | ||||||||
670 | |||||||||
671 | // We use alias analysis to check if an instruction may store to | ||||||||
672 | // the memory we load from in between the load and the store. If | ||||||||
673 | // such an instruction is found, we try to promote there instead | ||||||||
674 | // of at the store position. | ||||||||
675 | // TODO: Can use MSSA for this. | ||||||||
676 | Instruction *P = SI; | ||||||||
677 | for (auto &I : make_range(++LI->getIterator(), SI->getIterator())) { | ||||||||
678 | if (isModSet(AA->getModRefInfo(&I, LoadLoc))) { | ||||||||
679 | P = &I; | ||||||||
680 | break; | ||||||||
681 | } | ||||||||
682 | } | ||||||||
683 | |||||||||
684 | // We found an instruction that may write to the loaded memory. | ||||||||
685 | // We can try to promote at this position instead of the store | ||||||||
686 | // position if nothing alias the store memory after this and the store | ||||||||
687 | // destination is not in the range. | ||||||||
688 | if (P && P != SI) { | ||||||||
689 | if (!moveUp(SI, P, LI)) | ||||||||
690 | P = nullptr; | ||||||||
691 | } | ||||||||
692 | |||||||||
693 | // If a valid insertion position is found, then we can promote | ||||||||
694 | // the load/store pair to a memcpy. | ||||||||
695 | if (P) { | ||||||||
696 | // If we load from memory that may alias the memory we store to, | ||||||||
697 | // memmove must be used to preserve semantic. If not, memcpy can | ||||||||
698 | // be used. | ||||||||
699 | bool UseMemMove = false; | ||||||||
700 | if (!AA->isNoAlias(MemoryLocation::get(SI), LoadLoc)) | ||||||||
701 | UseMemMove = true; | ||||||||
702 | |||||||||
703 | uint64_t Size = DL.getTypeStoreSize(T); | ||||||||
704 | |||||||||
705 | IRBuilder<> Builder(P); | ||||||||
706 | Instruction *M; | ||||||||
707 | if (UseMemMove) | ||||||||
708 | M = Builder.CreateMemMove( | ||||||||
709 | SI->getPointerOperand(), SI->getAlign(), | ||||||||
710 | LI->getPointerOperand(), LI->getAlign(), Size); | ||||||||
711 | else | ||||||||
712 | M = Builder.CreateMemCpy( | ||||||||
713 | SI->getPointerOperand(), SI->getAlign(), | ||||||||
714 | LI->getPointerOperand(), LI->getAlign(), Size); | ||||||||
715 | |||||||||
716 | LLVM_DEBUG(dbgs() << "Promoting " << *LI << " to " << *SI << " => "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memcpyopt")) { dbgs() << "Promoting " << *LI << " to " << *SI << " => " << *M << "\n" ; } } while (false) | ||||||||
717 | << *M << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memcpyopt")) { dbgs() << "Promoting " << *LI << " to " << *SI << " => " << *M << "\n" ; } } while (false); | ||||||||
718 | |||||||||
719 | if (MSSAU) { | ||||||||
720 | auto *LastDef = | ||||||||
721 | cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(SI)); | ||||||||
722 | auto *NewAccess = | ||||||||
723 | MSSAU->createMemoryAccessAfter(M, LastDef, LastDef); | ||||||||
724 | MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true); | ||||||||
725 | } | ||||||||
726 | |||||||||
727 | eraseInstruction(SI); | ||||||||
728 | eraseInstruction(LI); | ||||||||
729 | ++NumMemCpyInstr; | ||||||||
730 | |||||||||
731 | // Make sure we do not invalidate the iterator. | ||||||||
732 | BBI = M->getIterator(); | ||||||||
733 | return true; | ||||||||
734 | } | ||||||||
735 | } | ||||||||
736 | |||||||||
737 | // Detect cases where we're performing call slot forwarding, but | ||||||||
738 | // happen to be using a load-store pair to implement it, rather than | ||||||||
739 | // a memcpy. | ||||||||
740 | CallInst *C = nullptr; | ||||||||
741 | if (EnableMemorySSA) { | ||||||||
742 | if (auto *LoadClobber = dyn_cast<MemoryUseOrDef>( | ||||||||
743 | MSSA->getWalker()->getClobberingMemoryAccess(LI))) { | ||||||||
744 | // The load most post-dom the call. Limit to the same block for now. | ||||||||
745 | // TODO: Support non-local call-slot optimization? | ||||||||
746 | if (LoadClobber->getBlock() == SI->getParent()) | ||||||||
747 | C = dyn_cast_or_null<CallInst>(LoadClobber->getMemoryInst()); | ||||||||
748 | } | ||||||||
749 | } else { | ||||||||
750 | MemDepResult ldep = MD->getDependency(LI); | ||||||||
751 | if (ldep.isClobber() && !isa<MemCpyInst>(ldep.getInst())) | ||||||||
752 | C = dyn_cast<CallInst>(ldep.getInst()); | ||||||||
753 | } | ||||||||
754 | |||||||||
755 | if (C) { | ||||||||
756 | // Check that nothing touches the dest of the "copy" between | ||||||||
757 | // the call and the store. | ||||||||
758 | MemoryLocation StoreLoc = MemoryLocation::get(SI); | ||||||||
759 | if (EnableMemorySSA) { | ||||||||
760 | if (accessedBetween(*AA, StoreLoc, MSSA->getMemoryAccess(C), | ||||||||
761 | MSSA->getMemoryAccess(SI))) | ||||||||
762 | C = nullptr; | ||||||||
763 | } else { | ||||||||
764 | for (BasicBlock::iterator I = --SI->getIterator(), | ||||||||
765 | E = C->getIterator(); | ||||||||
766 | I != E; --I) { | ||||||||
767 | if (isModOrRefSet(AA->getModRefInfo(&*I, StoreLoc))) { | ||||||||
768 | C = nullptr; | ||||||||
769 | break; | ||||||||
770 | } | ||||||||
771 | } | ||||||||
772 | } | ||||||||
773 | } | ||||||||
774 | |||||||||
775 | if (C) { | ||||||||
776 | bool changed = performCallSlotOptzn( | ||||||||
777 | LI, SI, SI->getPointerOperand()->stripPointerCasts(), | ||||||||
778 | LI->getPointerOperand()->stripPointerCasts(), | ||||||||
779 | DL.getTypeStoreSize(SI->getOperand(0)->getType()), | ||||||||
780 | commonAlignment(SI->getAlign(), LI->getAlign()), C); | ||||||||
781 | if (changed) { | ||||||||
782 | eraseInstruction(SI); | ||||||||
783 | eraseInstruction(LI); | ||||||||
784 | ++NumMemCpyInstr; | ||||||||
785 | return true; | ||||||||
786 | } | ||||||||
787 | } | ||||||||
788 | } | ||||||||
789 | } | ||||||||
790 | |||||||||
791 | // There are two cases that are interesting for this code to handle: memcpy | ||||||||
792 | // and memset. Right now we only handle memset. | ||||||||
793 | |||||||||
794 | // Ensure that the value being stored is something that can be memset'able a | ||||||||
795 | // byte at a time like "0" or "-1" or any width, as well as things like | ||||||||
796 | // 0xA0A0A0A0 and 0.0. | ||||||||
797 | auto *V = SI->getOperand(0); | ||||||||
798 | if (Value *ByteVal = isBytewiseValue(V, DL)) { | ||||||||
799 | if (Instruction *I = tryMergingIntoMemset(SI, SI->getPointerOperand(), | ||||||||
800 | ByteVal)) { | ||||||||
801 | BBI = I->getIterator(); // Don't invalidate iterator. | ||||||||
802 | return true; | ||||||||
803 | } | ||||||||
804 | |||||||||
805 | // If we have an aggregate, we try to promote it to memset regardless | ||||||||
806 | // of opportunity for merging as it can expose optimization opportunities | ||||||||
807 | // in subsequent passes. | ||||||||
808 | auto *T = V->getType(); | ||||||||
809 | if (T->isAggregateType()) { | ||||||||
810 | uint64_t Size = DL.getTypeStoreSize(T); | ||||||||
811 | IRBuilder<> Builder(SI); | ||||||||
812 | auto *M = Builder.CreateMemSet(SI->getPointerOperand(), ByteVal, Size, | ||||||||
813 | SI->getAlign()); | ||||||||
814 | |||||||||
815 | LLVM_DEBUG(dbgs() << "Promoting " << *SI << " to " << *M << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memcpyopt")) { dbgs() << "Promoting " << *SI << " to " << *M << "\n"; } } while (false); | ||||||||
816 | |||||||||
817 | if (MSSAU) { | ||||||||
818 | assert(isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(SI)))((isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess (SI))) ? static_cast<void> (0) : __assert_fail ("isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(SI))" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp" , 818, __PRETTY_FUNCTION__)); | ||||||||
819 | auto *LastDef = | ||||||||
820 | cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(SI)); | ||||||||
821 | auto *NewAccess = MSSAU->createMemoryAccessAfter(M, LastDef, LastDef); | ||||||||
822 | MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true); | ||||||||
823 | } | ||||||||
824 | |||||||||
825 | eraseInstruction(SI); | ||||||||
826 | NumMemSetInfer++; | ||||||||
827 | |||||||||
828 | // Make sure we do not invalidate the iterator. | ||||||||
829 | BBI = M->getIterator(); | ||||||||
830 | return true; | ||||||||
831 | } | ||||||||
832 | } | ||||||||
833 | |||||||||
834 | return false; | ||||||||
835 | } | ||||||||
836 | |||||||||
837 | bool MemCpyOptPass::processMemSet(MemSetInst *MSI, BasicBlock::iterator &BBI) { | ||||||||
838 | // See if there is another memset or store neighboring this memset which | ||||||||
839 | // allows us to widen out the memset to do a single larger store. | ||||||||
840 | if (isa<ConstantInt>(MSI->getLength()) && !MSI->isVolatile()) | ||||||||
841 | if (Instruction *I = tryMergingIntoMemset(MSI, MSI->getDest(), | ||||||||
842 | MSI->getValue())) { | ||||||||
843 | BBI = I->getIterator(); // Don't invalidate iterator. | ||||||||
844 | return true; | ||||||||
845 | } | ||||||||
846 | return false; | ||||||||
847 | } | ||||||||
848 | |||||||||
849 | /// Takes a memcpy and a call that it depends on, | ||||||||
850 | /// and checks for the possibility of a call slot optimization by having | ||||||||
851 | /// the call write its result directly into the destination of the memcpy. | ||||||||
852 | bool MemCpyOptPass::performCallSlotOptzn(Instruction *cpyLoad, | ||||||||
853 | Instruction *cpyStore, Value *cpyDest, | ||||||||
854 | Value *cpySrc, uint64_t cpyLen, | ||||||||
855 | Align cpyAlign, CallInst *C) { | ||||||||
856 | // The general transformation to keep in mind is | ||||||||
857 | // | ||||||||
858 | // call @func(..., src, ...) | ||||||||
859 | // memcpy(dest, src, ...) | ||||||||
860 | // | ||||||||
861 | // -> | ||||||||
862 | // | ||||||||
863 | // memcpy(dest, src, ...) | ||||||||
864 | // call @func(..., dest, ...) | ||||||||
865 | // | ||||||||
866 | // Since moving the memcpy is technically awkward, we additionally check that | ||||||||
867 | // src only holds uninitialized values at the moment of the call, meaning that | ||||||||
868 | // the memcpy can be discarded rather than moved. | ||||||||
869 | |||||||||
870 | // Lifetime marks shouldn't be operated on. | ||||||||
871 | if (Function *F = C->getCalledFunction()) | ||||||||
872 | if (F->isIntrinsic() && F->getIntrinsicID() == Intrinsic::lifetime_start) | ||||||||
873 | return false; | ||||||||
874 | |||||||||
875 | // Require that src be an alloca. This simplifies the reasoning considerably. | ||||||||
876 | AllocaInst *srcAlloca = dyn_cast<AllocaInst>(cpySrc); | ||||||||
877 | if (!srcAlloca) | ||||||||
878 | return false; | ||||||||
879 | |||||||||
880 | ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize()); | ||||||||
881 | if (!srcArraySize) | ||||||||
882 | return false; | ||||||||
883 | |||||||||
884 | const DataLayout &DL = cpyLoad->getModule()->getDataLayout(); | ||||||||
885 | uint64_t srcSize = DL.getTypeAllocSize(srcAlloca->getAllocatedType()) * | ||||||||
886 | srcArraySize->getZExtValue(); | ||||||||
887 | |||||||||
888 | if (cpyLen < srcSize) | ||||||||
889 | return false; | ||||||||
890 | |||||||||
891 | // Check that accessing the first srcSize bytes of dest will not cause a | ||||||||
892 | // trap. Otherwise the transform is invalid since it might cause a trap | ||||||||
893 | // to occur earlier than it otherwise would. | ||||||||
894 | if (!isDereferenceableAndAlignedPointer(cpyDest, Align(1), APInt(64, cpyLen), | ||||||||
895 | DL, C, DT)) | ||||||||
896 | return false; | ||||||||
897 | |||||||||
898 | // Make sure that nothing can observe cpyDest being written early. There are | ||||||||
899 | // a number of cases to consider: | ||||||||
900 | // 1. cpyDest cannot be accessed between C and cpyStore as a precondition of | ||||||||
901 | // the transform. | ||||||||
902 | // 2. C itself may not access cpyDest (prior to the transform). This is | ||||||||
903 | // checked further below. | ||||||||
904 | // 3. If cpyDest is accessible to the caller of this function (potentially | ||||||||
905 | // captured and not based on an alloca), we need to ensure that we cannot | ||||||||
906 | // unwind between C and cpyStore. This is checked here. | ||||||||
907 | // 4. If cpyDest is potentially captured, there may be accesses to it from | ||||||||
908 | // another thread. In this case, we need to check that cpyStore is | ||||||||
909 | // guaranteed to be executed if C is. As it is a non-atomic access, it | ||||||||
910 | // renders accesses from other threads undefined. | ||||||||
911 | // TODO: This is currently not checked. | ||||||||
912 | if (mayBeVisibleThroughUnwinding(cpyDest, C, cpyStore)) | ||||||||
913 | return false; | ||||||||
914 | |||||||||
915 | // Check that dest points to memory that is at least as aligned as src. | ||||||||
916 | Align srcAlign = srcAlloca->getAlign(); | ||||||||
917 | bool isDestSufficientlyAligned = srcAlign <= cpyAlign; | ||||||||
918 | // If dest is not aligned enough and we can't increase its alignment then | ||||||||
919 | // bail out. | ||||||||
920 | if (!isDestSufficientlyAligned && !isa<AllocaInst>(cpyDest)) | ||||||||
921 | return false; | ||||||||
922 | |||||||||
923 | // Check that src is not accessed except via the call and the memcpy. This | ||||||||
924 | // guarantees that it holds only undefined values when passed in (so the final | ||||||||
925 | // memcpy can be dropped), that it is not read or written between the call and | ||||||||
926 | // the memcpy, and that writing beyond the end of it is undefined. | ||||||||
927 | SmallVector<User *, 8> srcUseList(srcAlloca->users()); | ||||||||
928 | while (!srcUseList.empty()) { | ||||||||
929 | User *U = srcUseList.pop_back_val(); | ||||||||
930 | |||||||||
931 | if (isa<BitCastInst>(U) || isa<AddrSpaceCastInst>(U)) { | ||||||||
932 | append_range(srcUseList, U->users()); | ||||||||
933 | continue; | ||||||||
934 | } | ||||||||
935 | if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(U)) { | ||||||||
936 | if (!G->hasAllZeroIndices()) | ||||||||
937 | return false; | ||||||||
938 | |||||||||
939 | append_range(srcUseList, U->users()); | ||||||||
940 | continue; | ||||||||
941 | } | ||||||||
942 | if (const IntrinsicInst *IT = dyn_cast<IntrinsicInst>(U)) | ||||||||
943 | if (IT->isLifetimeStartOrEnd()) | ||||||||
944 | continue; | ||||||||
945 | |||||||||
946 | if (U != C && U != cpyLoad) | ||||||||
947 | return false; | ||||||||
948 | } | ||||||||
949 | |||||||||
950 | // Check that src isn't captured by the called function since the | ||||||||
951 | // transformation can cause aliasing issues in that case. | ||||||||
952 | for (unsigned ArgI = 0, E = C->arg_size(); ArgI != E; ++ArgI) | ||||||||
953 | if (C->getArgOperand(ArgI) == cpySrc && !C->doesNotCapture(ArgI)) | ||||||||
954 | return false; | ||||||||
955 | |||||||||
956 | // Since we're changing the parameter to the callsite, we need to make sure | ||||||||
957 | // that what would be the new parameter dominates the callsite. | ||||||||
958 | if (!DT->dominates(cpyDest, C)) { | ||||||||
959 | // Support moving a constant index GEP before the call. | ||||||||
960 | auto *GEP = dyn_cast<GetElementPtrInst>(cpyDest); | ||||||||
961 | if (GEP && GEP->hasAllConstantIndices() && | ||||||||
962 | DT->dominates(GEP->getPointerOperand(), C)) | ||||||||
963 | GEP->moveBefore(C); | ||||||||
964 | else | ||||||||
965 | return false; | ||||||||
966 | } | ||||||||
967 | |||||||||
968 | // In addition to knowing that the call does not access src in some | ||||||||
969 | // unexpected manner, for example via a global, which we deduce from | ||||||||
970 | // the use analysis, we also need to know that it does not sneakily | ||||||||
971 | // access dest. We rely on AA to figure this out for us. | ||||||||
972 | ModRefInfo MR = AA->getModRefInfo(C, cpyDest, LocationSize::precise(srcSize)); | ||||||||
973 | // If necessary, perform additional analysis. | ||||||||
974 | if (isModOrRefSet(MR)) | ||||||||
975 | MR = AA->callCapturesBefore(C, cpyDest, LocationSize::precise(srcSize), DT); | ||||||||
976 | if (isModOrRefSet(MR)) | ||||||||
977 | return false; | ||||||||
978 | |||||||||
979 | // We can't create address space casts here because we don't know if they're | ||||||||
980 | // safe for the target. | ||||||||
981 | if (cpySrc->getType()->getPointerAddressSpace() != | ||||||||
982 | cpyDest->getType()->getPointerAddressSpace()) | ||||||||
983 | return false; | ||||||||
984 | for (unsigned ArgI = 0; ArgI < C->arg_size(); ++ArgI) | ||||||||
985 | if (C->getArgOperand(ArgI)->stripPointerCasts() == cpySrc && | ||||||||
986 | cpySrc->getType()->getPointerAddressSpace() != | ||||||||
987 | C->getArgOperand(ArgI)->getType()->getPointerAddressSpace()) | ||||||||
988 | return false; | ||||||||
989 | |||||||||
990 | // All the checks have passed, so do the transformation. | ||||||||
991 | bool changedArgument = false; | ||||||||
992 | for (unsigned ArgI = 0; ArgI < C->arg_size(); ++ArgI) | ||||||||
993 | if (C->getArgOperand(ArgI)->stripPointerCasts() == cpySrc) { | ||||||||
994 | Value *Dest = cpySrc->getType() == cpyDest->getType() ? cpyDest | ||||||||
995 | : CastInst::CreatePointerCast(cpyDest, cpySrc->getType(), | ||||||||
996 | cpyDest->getName(), C); | ||||||||
997 | changedArgument = true; | ||||||||
998 | if (C->getArgOperand(ArgI)->getType() == Dest->getType()) | ||||||||
999 | C->setArgOperand(ArgI, Dest); | ||||||||
1000 | else | ||||||||
1001 | C->setArgOperand(ArgI, CastInst::CreatePointerCast( | ||||||||
1002 | Dest, C->getArgOperand(ArgI)->getType(), | ||||||||
1003 | Dest->getName(), C)); | ||||||||
1004 | } | ||||||||
1005 | |||||||||
1006 | if (!changedArgument) | ||||||||
1007 | return false; | ||||||||
1008 | |||||||||
1009 | // If the destination wasn't sufficiently aligned then increase its alignment. | ||||||||
1010 | if (!isDestSufficientlyAligned) { | ||||||||
1011 | assert(isa<AllocaInst>(cpyDest) && "Can only increase alloca alignment!")((isa<AllocaInst>(cpyDest) && "Can only increase alloca alignment!" ) ? static_cast<void> (0) : __assert_fail ("isa<AllocaInst>(cpyDest) && \"Can only increase alloca alignment!\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp" , 1011, __PRETTY_FUNCTION__)); | ||||||||
1012 | cast<AllocaInst>(cpyDest)->setAlignment(srcAlign); | ||||||||
1013 | } | ||||||||
1014 | |||||||||
1015 | // Drop any cached information about the call, because we may have changed | ||||||||
1016 | // its dependence information by changing its parameter. | ||||||||
1017 | if (MD) | ||||||||
1018 | MD->removeInstruction(C); | ||||||||
1019 | |||||||||
1020 | // Update AA metadata | ||||||||
1021 | // FIXME: MD_tbaa_struct and MD_mem_parallel_loop_access should also be | ||||||||
1022 | // handled here, but combineMetadata doesn't support them yet | ||||||||
1023 | unsigned KnownIDs[] = {LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope, | ||||||||
1024 | LLVMContext::MD_noalias, | ||||||||
1025 | LLVMContext::MD_invariant_group, | ||||||||
1026 | LLVMContext::MD_access_group}; | ||||||||
1027 | combineMetadata(C, cpyLoad, KnownIDs, true); | ||||||||
1028 | |||||||||
1029 | ++NumCallSlot; | ||||||||
1030 | return true; | ||||||||
1031 | } | ||||||||
1032 | |||||||||
1033 | /// We've found that the (upward scanning) memory dependence of memcpy 'M' is | ||||||||
1034 | /// the memcpy 'MDep'. Try to simplify M to copy from MDep's input if we can. | ||||||||
1035 | bool MemCpyOptPass::processMemCpyMemCpyDependence(MemCpyInst *M, | ||||||||
1036 | MemCpyInst *MDep) { | ||||||||
1037 | // We can only transforms memcpy's where the dest of one is the source of the | ||||||||
1038 | // other. | ||||||||
1039 | if (M->getSource() != MDep->getDest() || MDep->isVolatile()) | ||||||||
1040 | return false; | ||||||||
1041 | |||||||||
1042 | // If dep instruction is reading from our current input, then it is a noop | ||||||||
1043 | // transfer and substituting the input won't change this instruction. Just | ||||||||
1044 | // ignore the input and let someone else zap MDep. This handles cases like: | ||||||||
1045 | // memcpy(a <- a) | ||||||||
1046 | // memcpy(b <- a) | ||||||||
1047 | if (M->getSource() == MDep->getSource()) | ||||||||
1048 | return false; | ||||||||
1049 | |||||||||
1050 | // Second, the length of the memcpy's must be the same, or the preceding one | ||||||||
1051 | // must be larger than the following one. | ||||||||
1052 | ConstantInt *MDepLen = dyn_cast<ConstantInt>(MDep->getLength()); | ||||||||
1053 | ConstantInt *MLen = dyn_cast<ConstantInt>(M->getLength()); | ||||||||
1054 | if (!MDepLen || !MLen || MDepLen->getZExtValue() < MLen->getZExtValue()) | ||||||||
1055 | return false; | ||||||||
1056 | |||||||||
1057 | // Verify that the copied-from memory doesn't change in between the two | ||||||||
1058 | // transfers. For example, in: | ||||||||
1059 | // memcpy(a <- b) | ||||||||
1060 | // *b = 42; | ||||||||
1061 | // memcpy(c <- a) | ||||||||
1062 | // It would be invalid to transform the second memcpy into memcpy(c <- b). | ||||||||
1063 | // | ||||||||
1064 | // TODO: If the code between M and MDep is transparent to the destination "c", | ||||||||
1065 | // then we could still perform the xform by moving M up to the first memcpy. | ||||||||
1066 | if (EnableMemorySSA) { | ||||||||
1067 | // TODO: It would be sufficient to check the MDep source up to the memcpy | ||||||||
1068 | // size of M, rather than MDep. | ||||||||
1069 | if (writtenBetween(MSSA, MemoryLocation::getForSource(MDep), | ||||||||
1070 | MSSA->getMemoryAccess(MDep), MSSA->getMemoryAccess(M))) | ||||||||
1071 | return false; | ||||||||
1072 | } else { | ||||||||
1073 | // NOTE: This is conservative, it will stop on any read from the source loc, | ||||||||
1074 | // not just the defining memcpy. | ||||||||
1075 | MemDepResult SourceDep = | ||||||||
1076 | MD->getPointerDependencyFrom(MemoryLocation::getForSource(MDep), false, | ||||||||
1077 | M->getIterator(), M->getParent()); | ||||||||
1078 | if (!SourceDep.isClobber() || SourceDep.getInst() != MDep) | ||||||||
1079 | return false; | ||||||||
1080 | } | ||||||||
1081 | |||||||||
1082 | // If the dest of the second might alias the source of the first, then the | ||||||||
1083 | // source and dest might overlap. We still want to eliminate the intermediate | ||||||||
1084 | // value, but we have to generate a memmove instead of memcpy. | ||||||||
1085 | bool UseMemMove = false; | ||||||||
1086 | if (!AA->isNoAlias(MemoryLocation::getForDest(M), | ||||||||
1087 | MemoryLocation::getForSource(MDep))) | ||||||||
1088 | UseMemMove = true; | ||||||||
1089 | |||||||||
1090 | // If all checks passed, then we can transform M. | ||||||||
1091 | LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy->memcpy src:\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memcpyopt")) { dbgs() << "MemCpyOptPass: Forwarding memcpy->memcpy src:\n" << *MDep << '\n' << *M << '\n'; } } while (false) | ||||||||
1092 | << *MDep << '\n' << *M << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memcpyopt")) { dbgs() << "MemCpyOptPass: Forwarding memcpy->memcpy src:\n" << *MDep << '\n' << *M << '\n'; } } while (false); | ||||||||
1093 | |||||||||
1094 | // TODO: Is this worth it if we're creating a less aligned memcpy? For | ||||||||
1095 | // example we could be moving from movaps -> movq on x86. | ||||||||
1096 | IRBuilder<> Builder(M); | ||||||||
1097 | Instruction *NewM; | ||||||||
1098 | if (UseMemMove) | ||||||||
1099 | NewM = Builder.CreateMemMove(M->getRawDest(), M->getDestAlign(), | ||||||||
1100 | MDep->getRawSource(), MDep->getSourceAlign(), | ||||||||
1101 | M->getLength(), M->isVolatile()); | ||||||||
1102 | else | ||||||||
1103 | NewM = Builder.CreateMemCpy(M->getRawDest(), M->getDestAlign(), | ||||||||
1104 | MDep->getRawSource(), MDep->getSourceAlign(), | ||||||||
1105 | M->getLength(), M->isVolatile()); | ||||||||
1106 | |||||||||
1107 | if (MSSAU) { | ||||||||
1108 | assert(isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(M)))((isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess (M))) ? static_cast<void> (0) : __assert_fail ("isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(M))" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp" , 1108, __PRETTY_FUNCTION__)); | ||||||||
1109 | auto *LastDef = cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(M)); | ||||||||
1110 | auto *NewAccess = MSSAU->createMemoryAccessAfter(NewM, LastDef, LastDef); | ||||||||
1111 | MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true); | ||||||||
1112 | } | ||||||||
1113 | |||||||||
1114 | // Remove the instruction we're replacing. | ||||||||
1115 | eraseInstruction(M); | ||||||||
1116 | ++NumMemCpyInstr; | ||||||||
1117 | return true; | ||||||||
1118 | } | ||||||||
1119 | |||||||||
1120 | /// We've found that the (upward scanning) memory dependence of \p MemCpy is | ||||||||
1121 | /// \p MemSet. Try to simplify \p MemSet to only set the trailing bytes that | ||||||||
1122 | /// weren't copied over by \p MemCpy. | ||||||||
1123 | /// | ||||||||
1124 | /// In other words, transform: | ||||||||
1125 | /// \code | ||||||||
1126 | /// memset(dst, c, dst_size); | ||||||||
1127 | /// memcpy(dst, src, src_size); | ||||||||
1128 | /// \endcode | ||||||||
1129 | /// into: | ||||||||
1130 | /// \code | ||||||||
1131 | /// memcpy(dst, src, src_size); | ||||||||
1132 | /// memset(dst + src_size, c, dst_size <= src_size ? 0 : dst_size - src_size); | ||||||||
1133 | /// \endcode | ||||||||
1134 | bool MemCpyOptPass::processMemSetMemCpyDependence(MemCpyInst *MemCpy, | ||||||||
1135 | MemSetInst *MemSet) { | ||||||||
1136 | // We can only transform memset/memcpy with the same destination. | ||||||||
1137 | if (MemSet->getDest() != MemCpy->getDest()) | ||||||||
1138 | return false; | ||||||||
1139 | |||||||||
1140 | // Check that src and dst of the memcpy aren't the same. While memcpy | ||||||||
1141 | // operands cannot partially overlap, exact equality is allowed. | ||||||||
1142 | if (!AA->isNoAlias(MemoryLocation(MemCpy->getSource(), | ||||||||
1143 | LocationSize::precise(1)), | ||||||||
1144 | MemoryLocation(MemCpy->getDest(), | ||||||||
1145 | LocationSize::precise(1)))) | ||||||||
1146 | return false; | ||||||||
1147 | |||||||||
1148 | if (EnableMemorySSA) { | ||||||||
1149 | // We know that dst up to src_size is not written. We now need to make sure | ||||||||
1150 | // that dst up to dst_size is not accessed. (If we did not move the memset, | ||||||||
1151 | // checking for reads would be sufficient.) | ||||||||
1152 | if (accessedBetween(*AA, MemoryLocation::getForDest(MemSet), | ||||||||
1153 | MSSA->getMemoryAccess(MemSet), | ||||||||
1154 | MSSA->getMemoryAccess(MemCpy))) { | ||||||||
1155 | return false; | ||||||||
1156 | } | ||||||||
1157 | } else { | ||||||||
1158 | // We have already checked that dst up to src_size is not accessed. We | ||||||||
1159 | // need to make sure that there are no accesses up to dst_size either. | ||||||||
1160 | MemDepResult DstDepInfo = MD->getPointerDependencyFrom( | ||||||||
1161 | MemoryLocation::getForDest(MemSet), false, MemCpy->getIterator(), | ||||||||
1162 | MemCpy->getParent()); | ||||||||
1163 | if (DstDepInfo.getInst() != MemSet) | ||||||||
1164 | return false; | ||||||||
1165 | } | ||||||||
1166 | |||||||||
1167 | // Use the same i8* dest as the memcpy, killing the memset dest if different. | ||||||||
1168 | Value *Dest = MemCpy->getRawDest(); | ||||||||
1169 | Value *DestSize = MemSet->getLength(); | ||||||||
1170 | Value *SrcSize = MemCpy->getLength(); | ||||||||
1171 | |||||||||
1172 | if (mayBeVisibleThroughUnwinding(Dest, MemSet, MemCpy)) | ||||||||
1173 | return false; | ||||||||
1174 | |||||||||
1175 | // By default, create an unaligned memset. | ||||||||
1176 | unsigned Align = 1; | ||||||||
1177 | // If Dest is aligned, and SrcSize is constant, use the minimum alignment | ||||||||
1178 | // of the sum. | ||||||||
1179 | const unsigned DestAlign = | ||||||||
1180 | std::max(MemSet->getDestAlignment(), MemCpy->getDestAlignment()); | ||||||||
1181 | if (DestAlign > 1) | ||||||||
1182 | if (ConstantInt *SrcSizeC = dyn_cast<ConstantInt>(SrcSize)) | ||||||||
1183 | Align = MinAlign(SrcSizeC->getZExtValue(), DestAlign); | ||||||||
1184 | |||||||||
1185 | IRBuilder<> Builder(MemCpy); | ||||||||
1186 | |||||||||
1187 | // If the sizes have different types, zext the smaller one. | ||||||||
1188 | if (DestSize->getType() != SrcSize->getType()) { | ||||||||
1189 | if (DestSize->getType()->getIntegerBitWidth() > | ||||||||
1190 | SrcSize->getType()->getIntegerBitWidth()) | ||||||||
1191 | SrcSize = Builder.CreateZExt(SrcSize, DestSize->getType()); | ||||||||
1192 | else | ||||||||
1193 | DestSize = Builder.CreateZExt(DestSize, SrcSize->getType()); | ||||||||
1194 | } | ||||||||
1195 | |||||||||
1196 | Value *Ule = Builder.CreateICmpULE(DestSize, SrcSize); | ||||||||
1197 | Value *SizeDiff = Builder.CreateSub(DestSize, SrcSize); | ||||||||
1198 | Value *MemsetLen = Builder.CreateSelect( | ||||||||
1199 | Ule, ConstantInt::getNullValue(DestSize->getType()), SizeDiff); | ||||||||
1200 | Instruction *NewMemSet = Builder.CreateMemSet( | ||||||||
1201 | Builder.CreateGEP(Dest->getType()->getPointerElementType(), Dest, | ||||||||
1202 | SrcSize), | ||||||||
1203 | MemSet->getOperand(1), MemsetLen, MaybeAlign(Align)); | ||||||||
1204 | |||||||||
1205 | if (MSSAU) { | ||||||||
1206 | assert(isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy)) &&((isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess (MemCpy)) && "MemCpy must be a MemoryDef") ? static_cast <void> (0) : __assert_fail ("isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy)) && \"MemCpy must be a MemoryDef\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp" , 1207, __PRETTY_FUNCTION__)) | ||||||||
1207 | "MemCpy must be a MemoryDef")((isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess (MemCpy)) && "MemCpy must be a MemoryDef") ? static_cast <void> (0) : __assert_fail ("isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy)) && \"MemCpy must be a MemoryDef\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp" , 1207, __PRETTY_FUNCTION__)); | ||||||||
1208 | // The new memset is inserted after the memcpy, but it is known that its | ||||||||
1209 | // defining access is the memset about to be removed which immediately | ||||||||
1210 | // precedes the memcpy. | ||||||||
1211 | auto *LastDef = | ||||||||
1212 | cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy)); | ||||||||
1213 | auto *NewAccess = MSSAU->createMemoryAccessBefore( | ||||||||
1214 | NewMemSet, LastDef->getDefiningAccess(), LastDef); | ||||||||
1215 | MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true); | ||||||||
1216 | } | ||||||||
1217 | |||||||||
1218 | eraseInstruction(MemSet); | ||||||||
1219 | return true; | ||||||||
1220 | } | ||||||||
1221 | |||||||||
1222 | /// Determine whether the instruction has undefined content for the given Size, | ||||||||
1223 | /// either because it was freshly alloca'd or started its lifetime. | ||||||||
1224 | static bool hasUndefContents(Instruction *I, ConstantInt *Size) { | ||||||||
1225 | if (isa<AllocaInst>(I)) | ||||||||
1226 | return true; | ||||||||
1227 | |||||||||
1228 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) | ||||||||
1229 | if (II->getIntrinsicID() == Intrinsic::lifetime_start) | ||||||||
1230 | if (ConstantInt *LTSize = dyn_cast<ConstantInt>(II->getArgOperand(0))) | ||||||||
1231 | if (LTSize->getZExtValue() >= Size->getZExtValue()) | ||||||||
1232 | return true; | ||||||||
1233 | |||||||||
1234 | return false; | ||||||||
1235 | } | ||||||||
1236 | |||||||||
1237 | static bool hasUndefContentsMSSA(MemorySSA *MSSA, AliasAnalysis *AA, Value *V, | ||||||||
1238 | MemoryDef *Def, ConstantInt *Size) { | ||||||||
1239 | if (MSSA->isLiveOnEntryDef(Def)) | ||||||||
1240 | return isa<AllocaInst>(getUnderlyingObject(V)); | ||||||||
1241 | |||||||||
1242 | if (IntrinsicInst *II = | ||||||||
1243 | dyn_cast_or_null<IntrinsicInst>(Def->getMemoryInst())) { | ||||||||
1244 | if (II->getIntrinsicID() == Intrinsic::lifetime_start) { | ||||||||
1245 | ConstantInt *LTSize = cast<ConstantInt>(II->getArgOperand(0)); | ||||||||
1246 | if (AA->isMustAlias(V, II->getArgOperand(1)) && | ||||||||
1247 | LTSize->getZExtValue() >= Size->getZExtValue()) | ||||||||
1248 | return true; | ||||||||
1249 | } | ||||||||
1250 | } | ||||||||
1251 | |||||||||
1252 | return false; | ||||||||
1253 | } | ||||||||
1254 | |||||||||
1255 | /// Transform memcpy to memset when its source was just memset. | ||||||||
1256 | /// In other words, turn: | ||||||||
1257 | /// \code | ||||||||
1258 | /// memset(dst1, c, dst1_size); | ||||||||
1259 | /// memcpy(dst2, dst1, dst2_size); | ||||||||
1260 | /// \endcode | ||||||||
1261 | /// into: | ||||||||
1262 | /// \code | ||||||||
1263 | /// memset(dst1, c, dst1_size); | ||||||||
1264 | /// memset(dst2, c, dst2_size); | ||||||||
1265 | /// \endcode | ||||||||
1266 | /// When dst2_size <= dst1_size. | ||||||||
1267 | /// | ||||||||
1268 | /// The \p MemCpy must have a Constant length. | ||||||||
1269 | bool MemCpyOptPass::performMemCpyToMemSetOptzn(MemCpyInst *MemCpy, | ||||||||
1270 | MemSetInst *MemSet) { | ||||||||
1271 | // Make sure that memcpy(..., memset(...), ...), that is we are memsetting and | ||||||||
1272 | // memcpying from the same address. Otherwise it is hard to reason about. | ||||||||
1273 | if (!AA->isMustAlias(MemSet->getRawDest(), MemCpy->getRawSource())) | ||||||||
1274 | return false; | ||||||||
1275 | |||||||||
1276 | // A known memset size is required. | ||||||||
1277 | ConstantInt *MemSetSize = dyn_cast<ConstantInt>(MemSet->getLength()); | ||||||||
1278 | if (!MemSetSize) | ||||||||
1279 | return false; | ||||||||
1280 | |||||||||
1281 | // Make sure the memcpy doesn't read any more than what the memset wrote. | ||||||||
1282 | // Don't worry about sizes larger than i64. | ||||||||
1283 | ConstantInt *CopySize = cast<ConstantInt>(MemCpy->getLength()); | ||||||||
1284 | if (CopySize->getZExtValue() > MemSetSize->getZExtValue()) { | ||||||||
1285 | // If the memcpy is larger than the memset, but the memory was undef prior | ||||||||
1286 | // to the memset, we can just ignore the tail. Technically we're only | ||||||||
1287 | // interested in the bytes from MemSetSize..CopySize here, but as we can't | ||||||||
1288 | // easily represent this location, we use the full 0..CopySize range. | ||||||||
1289 | MemoryLocation MemCpyLoc = MemoryLocation::getForSource(MemCpy); | ||||||||
1290 | bool CanReduceSize = false; | ||||||||
1291 | if (EnableMemorySSA) { | ||||||||
1292 | MemoryUseOrDef *MemSetAccess = MSSA->getMemoryAccess(MemSet); | ||||||||
1293 | MemoryAccess *Clobber = MSSA->getWalker()->getClobberingMemoryAccess( | ||||||||
1294 | MemSetAccess->getDefiningAccess(), MemCpyLoc); | ||||||||
1295 | if (auto *MD = dyn_cast<MemoryDef>(Clobber)) | ||||||||
1296 | if (hasUndefContentsMSSA(MSSA, AA, MemCpy->getSource(), MD, CopySize)) | ||||||||
1297 | CanReduceSize = true; | ||||||||
1298 | } else { | ||||||||
1299 | MemDepResult DepInfo = MD->getPointerDependencyFrom( | ||||||||
1300 | MemCpyLoc, true, MemSet->getIterator(), MemSet->getParent()); | ||||||||
1301 | if (DepInfo.isDef() && hasUndefContents(DepInfo.getInst(), CopySize)) | ||||||||
1302 | CanReduceSize = true; | ||||||||
1303 | } | ||||||||
1304 | |||||||||
1305 | if (!CanReduceSize) | ||||||||
1306 | return false; | ||||||||
1307 | CopySize = MemSetSize; | ||||||||
1308 | } | ||||||||
1309 | |||||||||
1310 | IRBuilder<> Builder(MemCpy); | ||||||||
1311 | Instruction *NewM = | ||||||||
1312 | Builder.CreateMemSet(MemCpy->getRawDest(), MemSet->getOperand(1), | ||||||||
1313 | CopySize, MaybeAlign(MemCpy->getDestAlignment())); | ||||||||
1314 | if (MSSAU) { | ||||||||
1315 | auto *LastDef = | ||||||||
1316 | cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy)); | ||||||||
1317 | auto *NewAccess = MSSAU->createMemoryAccessAfter(NewM, LastDef, LastDef); | ||||||||
1318 | MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true); | ||||||||
1319 | } | ||||||||
1320 | |||||||||
1321 | return true; | ||||||||
1322 | } | ||||||||
1323 | |||||||||
1324 | /// Perform simplification of memcpy's. If we have memcpy A | ||||||||
1325 | /// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite | ||||||||
1326 | /// B to be a memcpy from X to Z (or potentially a memmove, depending on | ||||||||
1327 | /// circumstances). This allows later passes to remove the first memcpy | ||||||||
1328 | /// altogether. | ||||||||
1329 | bool MemCpyOptPass::processMemCpy(MemCpyInst *M, BasicBlock::iterator &BBI) { | ||||||||
1330 | // We can only optimize non-volatile memcpy's. | ||||||||
1331 | if (M->isVolatile()) return false; | ||||||||
1332 | |||||||||
1333 | // If the source and destination of the memcpy are the same, then zap it. | ||||||||
1334 | if (M->getSource() == M->getDest()) { | ||||||||
1335 | ++BBI; | ||||||||
1336 | eraseInstruction(M); | ||||||||
1337 | return true; | ||||||||
1338 | } | ||||||||
1339 | |||||||||
1340 | // If copying from a constant, try to turn the memcpy into a memset. | ||||||||
1341 | if (GlobalVariable *GV
| ||||||||
1342 | if (GV->isConstant() && GV->hasDefinitiveInitializer()) | ||||||||
1343 | if (Value *ByteVal = isBytewiseValue(GV->getInitializer(), | ||||||||
1344 | M->getModule()->getDataLayout())) { | ||||||||
1345 | IRBuilder<> Builder(M); | ||||||||
1346 | Instruction *NewM = | ||||||||
1347 | Builder.CreateMemSet(M->getRawDest(), ByteVal, M->getLength(), | ||||||||
1348 | MaybeAlign(M->getDestAlignment()), false); | ||||||||
1349 | if (MSSAU) { | ||||||||
1350 | auto *LastDef = | ||||||||
1351 | cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(M)); | ||||||||
1352 | auto *NewAccess = | ||||||||
1353 | MSSAU->createMemoryAccessAfter(NewM, LastDef, LastDef); | ||||||||
1354 | MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true); | ||||||||
1355 | } | ||||||||
1356 | |||||||||
1357 | eraseInstruction(M); | ||||||||
1358 | ++NumCpyToSet; | ||||||||
1359 | return true; | ||||||||
1360 | } | ||||||||
1361 | |||||||||
1362 | if (EnableMemorySSA) { | ||||||||
1363 | MemoryUseOrDef *MA = MSSA->getMemoryAccess(M); | ||||||||
| |||||||||
1364 | MemoryAccess *AnyClobber = MSSA->getWalker()->getClobberingMemoryAccess(MA); | ||||||||
1365 | MemoryLocation DestLoc = MemoryLocation::getForDest(M); | ||||||||
1366 | const MemoryAccess *DestClobber = | ||||||||
1367 | MSSA->getWalker()->getClobberingMemoryAccess(AnyClobber, DestLoc); | ||||||||
1368 | |||||||||
1369 | // Try to turn a partially redundant memset + memcpy into | ||||||||
1370 | // memcpy + smaller memset. We don't need the memcpy size for this. | ||||||||
1371 | // The memcpy most post-dom the memset, so limit this to the same basic | ||||||||
1372 | // block. A non-local generalization is likely not worthwhile. | ||||||||
1373 | if (auto *MD = dyn_cast<MemoryDef>(DestClobber)) | ||||||||
1374 | if (auto *MDep = dyn_cast_or_null<MemSetInst>(MD->getMemoryInst())) | ||||||||
1375 | if (DestClobber->getBlock() == M->getParent()) | ||||||||
1376 | if (processMemSetMemCpyDependence(M, MDep)) | ||||||||
1377 | return true; | ||||||||
1378 | |||||||||
1379 | // The optimizations after this point require the memcpy size. | ||||||||
1380 | ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength()); | ||||||||
1381 | if (!CopySize) return false; | ||||||||
1382 | |||||||||
1383 | MemoryAccess *SrcClobber = MSSA->getWalker()->getClobberingMemoryAccess( | ||||||||
1384 | AnyClobber, MemoryLocation::getForSource(M)); | ||||||||
1385 | |||||||||
1386 | // There are four possible optimizations we can do for memcpy: | ||||||||
1387 | // a) memcpy-memcpy xform which exposes redundance for DSE. | ||||||||
1388 | // b) call-memcpy xform for return slot optimization. | ||||||||
1389 | // c) memcpy from freshly alloca'd space or space that has just started | ||||||||
1390 | // its lifetime copies undefined data, and we can therefore eliminate | ||||||||
1391 | // the memcpy in favor of the data that was already at the destination. | ||||||||
1392 | // d) memcpy from a just-memset'd source can be turned into memset. | ||||||||
1393 | if (auto *MD = dyn_cast<MemoryDef>(SrcClobber)) { | ||||||||
1394 | if (Instruction *MI = MD->getMemoryInst()) { | ||||||||
1395 | if (auto *C = dyn_cast<CallInst>(MI)) { | ||||||||
1396 | // The memcpy must post-dom the call. Limit to the same block for now. | ||||||||
1397 | // Additionally, we need to ensure that there are no accesses to dest | ||||||||
1398 | // between the call and the memcpy. Accesses to src will be checked | ||||||||
1399 | // by performCallSlotOptzn(). | ||||||||
1400 | // TODO: Support non-local call-slot optimization? | ||||||||
1401 | if (C->getParent() == M->getParent() && | ||||||||
1402 | !accessedBetween(*AA, DestLoc, MD, MA)) { | ||||||||
1403 | // FIXME: Can we pass in either of dest/src alignment here instead | ||||||||
1404 | // of conservatively taking the minimum? | ||||||||
1405 | Align Alignment = std::min(M->getDestAlign().valueOrOne(), | ||||||||
1406 | M->getSourceAlign().valueOrOne()); | ||||||||
1407 | if (performCallSlotOptzn(M, M, M->getDest(), M->getSource(), | ||||||||
1408 | CopySize->getZExtValue(), Alignment, C)) { | ||||||||
1409 | LLVM_DEBUG(dbgs() << "Performed call slot optimization:\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memcpyopt")) { dbgs() << "Performed call slot optimization:\n" << " call: " << *C << "\n" << " memcpy: " << *M << "\n"; } } while (false) | ||||||||
1410 | << " call: " << *C << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memcpyopt")) { dbgs() << "Performed call slot optimization:\n" << " call: " << *C << "\n" << " memcpy: " << *M << "\n"; } } while (false) | ||||||||
1411 | << " memcpy: " << *M << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memcpyopt")) { dbgs() << "Performed call slot optimization:\n" << " call: " << *C << "\n" << " memcpy: " << *M << "\n"; } } while (false); | ||||||||
1412 | eraseInstruction(M); | ||||||||
1413 | ++NumMemCpyInstr; | ||||||||
1414 | return true; | ||||||||
1415 | } | ||||||||
1416 | } | ||||||||
1417 | } | ||||||||
1418 | if (auto *MDep = dyn_cast<MemCpyInst>(MI)) | ||||||||
1419 | return processMemCpyMemCpyDependence(M, MDep); | ||||||||
1420 | if (auto *MDep = dyn_cast<MemSetInst>(MI)) { | ||||||||
1421 | if (performMemCpyToMemSetOptzn(M, MDep)) { | ||||||||
1422 | LLVM_DEBUG(dbgs() << "Converted memcpy to memset\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memcpyopt")) { dbgs() << "Converted memcpy to memset\n" ; } } while (false); | ||||||||
1423 | eraseInstruction(M); | ||||||||
1424 | ++NumCpyToSet; | ||||||||
1425 | return true; | ||||||||
1426 | } | ||||||||
1427 | } | ||||||||
1428 | } | ||||||||
1429 | |||||||||
1430 | if (hasUndefContentsMSSA(MSSA, AA, M->getSource(), MD, CopySize)) { | ||||||||
1431 | LLVM_DEBUG(dbgs() << "Removed memcpy from undef\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memcpyopt")) { dbgs() << "Removed memcpy from undef\n" ; } } while (false); | ||||||||
1432 | eraseInstruction(M); | ||||||||
1433 | ++NumMemCpyInstr; | ||||||||
1434 | return true; | ||||||||
1435 | } | ||||||||
1436 | } | ||||||||
1437 | } else { | ||||||||
1438 | MemDepResult DepInfo = MD->getDependency(M); | ||||||||
1439 | |||||||||
1440 | // Try to turn a partially redundant memset + memcpy into | ||||||||
1441 | // memcpy + smaller memset. We don't need the memcpy size for this. | ||||||||
1442 | if (DepInfo.isClobber()) | ||||||||
1443 | if (MemSetInst *MDep = dyn_cast<MemSetInst>(DepInfo.getInst())) | ||||||||
1444 | if (processMemSetMemCpyDependence(M, MDep)) | ||||||||
1445 | return true; | ||||||||
1446 | |||||||||
1447 | // The optimizations after this point require the memcpy size. | ||||||||
1448 | ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength()); | ||||||||
1449 | if (!CopySize) return false; | ||||||||
1450 | |||||||||
1451 | // There are four possible optimizations we can do for memcpy: | ||||||||
1452 | // a) memcpy-memcpy xform which exposes redundance for DSE. | ||||||||
1453 | // b) call-memcpy xform for return slot optimization. | ||||||||
1454 | // c) memcpy from freshly alloca'd space or space that has just started | ||||||||
1455 | // its lifetime copies undefined data, and we can therefore eliminate | ||||||||
1456 | // the memcpy in favor of the data that was already at the destination. | ||||||||
1457 | // d) memcpy from a just-memset'd source can be turned into memset. | ||||||||
1458 | if (DepInfo.isClobber()) { | ||||||||
1459 | if (CallInst *C = dyn_cast<CallInst>(DepInfo.getInst())) { | ||||||||
1460 | // FIXME: Can we pass in either of dest/src alignment here instead | ||||||||
1461 | // of conservatively taking the minimum? | ||||||||
1462 | Align Alignment = std::min(M->getDestAlign().valueOrOne(), | ||||||||
1463 | M->getSourceAlign().valueOrOne()); | ||||||||
1464 | if (performCallSlotOptzn(M, M, M->getDest(), M->getSource(), | ||||||||
1465 | CopySize->getZExtValue(), Alignment, C)) { | ||||||||
1466 | eraseInstruction(M); | ||||||||
1467 | ++NumMemCpyInstr; | ||||||||
1468 | return true; | ||||||||
1469 | } | ||||||||
1470 | } | ||||||||
1471 | } | ||||||||
1472 | |||||||||
1473 | MemoryLocation SrcLoc = MemoryLocation::getForSource(M); | ||||||||
1474 | MemDepResult SrcDepInfo = MD->getPointerDependencyFrom( | ||||||||
1475 | SrcLoc, true, M->getIterator(), M->getParent()); | ||||||||
1476 | |||||||||
1477 | if (SrcDepInfo.isClobber()) { | ||||||||
1478 | if (MemCpyInst *MDep = dyn_cast<MemCpyInst>(SrcDepInfo.getInst())) | ||||||||
1479 | return processMemCpyMemCpyDependence(M, MDep); | ||||||||
1480 | } else if (SrcDepInfo.isDef()) { | ||||||||
1481 | if (hasUndefContents(SrcDepInfo.getInst(), CopySize)) { | ||||||||
1482 | eraseInstruction(M); | ||||||||
1483 | ++NumMemCpyInstr; | ||||||||
1484 | return true; | ||||||||
1485 | } | ||||||||
1486 | } | ||||||||
1487 | |||||||||
1488 | if (SrcDepInfo.isClobber()) | ||||||||
1489 | if (MemSetInst *MDep = dyn_cast<MemSetInst>(SrcDepInfo.getInst())) | ||||||||
1490 | if (performMemCpyToMemSetOptzn(M, MDep)) { | ||||||||
1491 | eraseInstruction(M); | ||||||||
1492 | ++NumCpyToSet; | ||||||||
1493 | return true; | ||||||||
1494 | } | ||||||||
1495 | } | ||||||||
1496 | |||||||||
1497 | return false; | ||||||||
1498 | } | ||||||||
1499 | |||||||||
1500 | /// Transforms memmove calls to memcpy calls when the src/dst are guaranteed | ||||||||
1501 | /// not to alias. | ||||||||
1502 | bool MemCpyOptPass::processMemMove(MemMoveInst *M) { | ||||||||
1503 | if (!TLI->has(LibFunc_memmove)) | ||||||||
1504 | return false; | ||||||||
1505 | |||||||||
1506 | // See if the pointers alias. | ||||||||
1507 | if (!AA->isNoAlias(MemoryLocation::getForDest(M), | ||||||||
1508 | MemoryLocation::getForSource(M))) | ||||||||
1509 | return false; | ||||||||
1510 | |||||||||
1511 | LLVM_DEBUG(dbgs() << "MemCpyOptPass: Optimizing memmove -> memcpy: " << *Mdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memcpyopt")) { dbgs() << "MemCpyOptPass: Optimizing memmove -> memcpy: " << *M << "\n"; } } while (false) | ||||||||
1512 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memcpyopt")) { dbgs() << "MemCpyOptPass: Optimizing memmove -> memcpy: " << *M << "\n"; } } while (false); | ||||||||
1513 | |||||||||
1514 | // If not, then we know we can transform this. | ||||||||
1515 | Type *ArgTys[3] = { M->getRawDest()->getType(), | ||||||||
1516 | M->getRawSource()->getType(), | ||||||||
1517 | M->getLength()->getType() }; | ||||||||
1518 | M->setCalledFunction(Intrinsic::getDeclaration(M->getModule(), | ||||||||
1519 | Intrinsic::memcpy, ArgTys)); | ||||||||
1520 | |||||||||
1521 | // For MemorySSA nothing really changes (except that memcpy may imply stricter | ||||||||
1522 | // aliasing guarantees). | ||||||||
1523 | |||||||||
1524 | // MemDep may have over conservative information about this instruction, just | ||||||||
1525 | // conservatively flush it from the cache. | ||||||||
1526 | if (MD) | ||||||||
1527 | MD->removeInstruction(M); | ||||||||
1528 | |||||||||
1529 | ++NumMoveToCpy; | ||||||||
1530 | return true; | ||||||||
1531 | } | ||||||||
1532 | |||||||||
1533 | /// This is called on every byval argument in call sites. | ||||||||
1534 | bool MemCpyOptPass::processByValArgument(CallBase &CB, unsigned ArgNo) { | ||||||||
1535 | const DataLayout &DL = CB.getCaller()->getParent()->getDataLayout(); | ||||||||
1536 | // Find out what feeds this byval argument. | ||||||||
1537 | Value *ByValArg = CB.getArgOperand(ArgNo); | ||||||||
1538 | Type *ByValTy = cast<PointerType>(ByValArg->getType())->getElementType(); | ||||||||
1539 | uint64_t ByValSize = DL.getTypeAllocSize(ByValTy); | ||||||||
1540 | MemoryLocation Loc(ByValArg, LocationSize::precise(ByValSize)); | ||||||||
1541 | MemCpyInst *MDep = nullptr; | ||||||||
1542 | if (EnableMemorySSA) { | ||||||||
1543 | MemoryUseOrDef *CallAccess = MSSA->getMemoryAccess(&CB); | ||||||||
1544 | MemoryAccess *Clobber = MSSA->getWalker()->getClobberingMemoryAccess( | ||||||||
1545 | CallAccess->getDefiningAccess(), Loc); | ||||||||
1546 | if (auto *MD = dyn_cast<MemoryDef>(Clobber)) | ||||||||
1547 | MDep = dyn_cast_or_null<MemCpyInst>(MD->getMemoryInst()); | ||||||||
1548 | } else { | ||||||||
1549 | MemDepResult DepInfo = MD->getPointerDependencyFrom( | ||||||||
1550 | Loc, true, CB.getIterator(), CB.getParent()); | ||||||||
1551 | if (!DepInfo.isClobber()) | ||||||||
1552 | return false; | ||||||||
1553 | MDep = dyn_cast<MemCpyInst>(DepInfo.getInst()); | ||||||||
1554 | } | ||||||||
1555 | |||||||||
1556 | // If the byval argument isn't fed by a memcpy, ignore it. If it is fed by | ||||||||
1557 | // a memcpy, see if we can byval from the source of the memcpy instead of the | ||||||||
1558 | // result. | ||||||||
1559 | if (!MDep || MDep->isVolatile() || | ||||||||
1560 | ByValArg->stripPointerCasts() != MDep->getDest()) | ||||||||
1561 | return false; | ||||||||
1562 | |||||||||
1563 | // The length of the memcpy must be larger or equal to the size of the byval. | ||||||||
1564 | ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength()); | ||||||||
1565 | if (!C1 || C1->getValue().getZExtValue() < ByValSize) | ||||||||
1566 | return false; | ||||||||
1567 | |||||||||
1568 | // Get the alignment of the byval. If the call doesn't specify the alignment, | ||||||||
1569 | // then it is some target specific value that we can't know. | ||||||||
1570 | MaybeAlign ByValAlign = CB.getParamAlign(ArgNo); | ||||||||
1571 | if (!ByValAlign) return false; | ||||||||
1572 | |||||||||
1573 | // If it is greater than the memcpy, then we check to see if we can force the | ||||||||
1574 | // source of the memcpy to the alignment we need. If we fail, we bail out. | ||||||||
1575 | MaybeAlign MemDepAlign = MDep->getSourceAlign(); | ||||||||
1576 | if ((!MemDepAlign || *MemDepAlign < *ByValAlign) && | ||||||||
1577 | getOrEnforceKnownAlignment(MDep->getSource(), ByValAlign, DL, &CB, AC, | ||||||||
1578 | DT) < *ByValAlign) | ||||||||
1579 | return false; | ||||||||
1580 | |||||||||
1581 | // The address space of the memcpy source must match the byval argument | ||||||||
1582 | if (MDep->getSource()->getType()->getPointerAddressSpace() != | ||||||||
1583 | ByValArg->getType()->getPointerAddressSpace()) | ||||||||
1584 | return false; | ||||||||
1585 | |||||||||
1586 | // Verify that the copied-from memory doesn't change in between the memcpy and | ||||||||
1587 | // the byval call. | ||||||||
1588 | // memcpy(a <- b) | ||||||||
1589 | // *b = 42; | ||||||||
1590 | // foo(*a) | ||||||||
1591 | // It would be invalid to transform the second memcpy into foo(*b). | ||||||||
1592 | if (EnableMemorySSA) { | ||||||||
1593 | if (writtenBetween(MSSA, MemoryLocation::getForSource(MDep), | ||||||||
1594 | MSSA->getMemoryAccess(MDep), MSSA->getMemoryAccess(&CB))) | ||||||||
1595 | return false; | ||||||||
1596 | } else { | ||||||||
1597 | // NOTE: This is conservative, it will stop on any read from the source loc, | ||||||||
1598 | // not just the defining memcpy. | ||||||||
1599 | MemDepResult SourceDep = MD->getPointerDependencyFrom( | ||||||||
1600 | MemoryLocation::getForSource(MDep), false, | ||||||||
1601 | CB.getIterator(), MDep->getParent()); | ||||||||
1602 | if (!SourceDep.isClobber() || SourceDep.getInst() != MDep) | ||||||||
1603 | return false; | ||||||||
1604 | } | ||||||||
1605 | |||||||||
1606 | Value *TmpCast = MDep->getSource(); | ||||||||
1607 | if (MDep->getSource()->getType() != ByValArg->getType()) { | ||||||||
1608 | BitCastInst *TmpBitCast = new BitCastInst(MDep->getSource(), ByValArg->getType(), | ||||||||
1609 | "tmpcast", &CB); | ||||||||
1610 | // Set the tmpcast's DebugLoc to MDep's | ||||||||
1611 | TmpBitCast->setDebugLoc(MDep->getDebugLoc()); | ||||||||
1612 | TmpCast = TmpBitCast; | ||||||||
1613 | } | ||||||||
1614 | |||||||||
1615 | LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy to byval:\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memcpyopt")) { dbgs() << "MemCpyOptPass: Forwarding memcpy to byval:\n" << " " << *MDep << "\n" << " " << CB << "\n"; } } while (false) | ||||||||
1616 | << " " << *MDep << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memcpyopt")) { dbgs() << "MemCpyOptPass: Forwarding memcpy to byval:\n" << " " << *MDep << "\n" << " " << CB << "\n"; } } while (false) | ||||||||
1617 | << " " << CB << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("memcpyopt")) { dbgs() << "MemCpyOptPass: Forwarding memcpy to byval:\n" << " " << *MDep << "\n" << " " << CB << "\n"; } } while (false); | ||||||||
1618 | |||||||||
1619 | // Otherwise we're good! Update the byval argument. | ||||||||
1620 | CB.setArgOperand(ArgNo, TmpCast); | ||||||||
1621 | ++NumMemCpyInstr; | ||||||||
1622 | return true; | ||||||||
1623 | } | ||||||||
1624 | |||||||||
1625 | /// Executes one iteration of MemCpyOptPass. | ||||||||
1626 | bool MemCpyOptPass::iterateOnFunction(Function &F) { | ||||||||
1627 | bool MadeChange = false; | ||||||||
1628 | |||||||||
1629 | // Walk all instruction in the function. | ||||||||
1630 | for (BasicBlock &BB : F) { | ||||||||
1631 | // Skip unreachable blocks. For example processStore assumes that an | ||||||||
1632 | // instruction in a BB can't be dominated by a later instruction in the | ||||||||
1633 | // same BB (which is a scenario that can happen for an unreachable BB that | ||||||||
1634 | // has itself as a predecessor). | ||||||||
1635 | if (!DT->isReachableFromEntry(&BB)) | ||||||||
1636 | continue; | ||||||||
1637 | |||||||||
1638 | for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) { | ||||||||
1639 | // Avoid invalidating the iterator. | ||||||||
1640 | Instruction *I = &*BI++; | ||||||||
1641 | |||||||||
1642 | bool RepeatInstruction = false; | ||||||||
1643 | |||||||||
1644 | if (StoreInst *SI
| ||||||||
1645 | MadeChange |= processStore(SI, BI); | ||||||||
1646 | else if (MemSetInst *M
| ||||||||
1647 | RepeatInstruction = processMemSet(M, BI); | ||||||||
1648 | else if (MemCpyInst *M
| ||||||||
1649 | RepeatInstruction = processMemCpy(M, BI); | ||||||||
1650 | else if (MemMoveInst *M = dyn_cast<MemMoveInst>(I)) | ||||||||
1651 | RepeatInstruction = processMemMove(M); | ||||||||
1652 | else if (auto *CB = dyn_cast<CallBase>(I)) { | ||||||||
1653 | for (unsigned i = 0, e = CB->arg_size(); i != e; ++i) | ||||||||
1654 | if (CB->isByValArgument(i)) | ||||||||
1655 | MadeChange |= processByValArgument(*CB, i); | ||||||||
1656 | } | ||||||||
1657 | |||||||||
1658 | // Reprocess the instruction if desired. | ||||||||
1659 | if (RepeatInstruction) { | ||||||||
1660 | if (BI != BB.begin()) | ||||||||
1661 | --BI; | ||||||||
1662 | MadeChange = true; | ||||||||
1663 | } | ||||||||
1664 | } | ||||||||
1665 | } | ||||||||
1666 | |||||||||
1667 | return MadeChange; | ||||||||
1668 | } | ||||||||
1669 | |||||||||
1670 | PreservedAnalyses MemCpyOptPass::run(Function &F, FunctionAnalysisManager &AM) { | ||||||||
1671 | auto *MD = !EnableMemorySSA ? &AM.getResult<MemoryDependenceAnalysis>(F) | ||||||||
1672 | : AM.getCachedResult<MemoryDependenceAnalysis>(F); | ||||||||
1673 | auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); | ||||||||
1674 | auto *AA = &AM.getResult<AAManager>(F); | ||||||||
1675 | auto *AC = &AM.getResult<AssumptionAnalysis>(F); | ||||||||
1676 | auto *DT = &AM.getResult<DominatorTreeAnalysis>(F); | ||||||||
1677 | auto *MSSA = EnableMemorySSA ? &AM.getResult<MemorySSAAnalysis>(F) | ||||||||
1678 | : AM.getCachedResult<MemorySSAAnalysis>(F); | ||||||||
1679 | |||||||||
1680 | bool MadeChange = | ||||||||
1681 | runImpl(F, MD, &TLI, AA, AC, DT, MSSA ? &MSSA->getMSSA() : nullptr); | ||||||||
1682 | if (!MadeChange) | ||||||||
1683 | return PreservedAnalyses::all(); | ||||||||
1684 | |||||||||
1685 | PreservedAnalyses PA; | ||||||||
1686 | PA.preserveSet<CFGAnalyses>(); | ||||||||
1687 | PA.preserve<GlobalsAA>(); | ||||||||
1688 | if (MD) | ||||||||
1689 | PA.preserve<MemoryDependenceAnalysis>(); | ||||||||
1690 | if (MSSA) | ||||||||
1691 | PA.preserve<MemorySSAAnalysis>(); | ||||||||
1692 | return PA; | ||||||||
1693 | } | ||||||||
1694 | |||||||||
1695 | bool MemCpyOptPass::runImpl(Function &F, MemoryDependenceResults *MD_, | ||||||||
1696 | TargetLibraryInfo *TLI_, AliasAnalysis *AA_, | ||||||||
1697 | AssumptionCache *AC_, DominatorTree *DT_, | ||||||||
1698 | MemorySSA *MSSA_) { | ||||||||
1699 | bool MadeChange = false; | ||||||||
1700 | MD = MD_; | ||||||||
1701 | TLI = TLI_; | ||||||||
1702 | AA = AA_; | ||||||||
1703 | AC = AC_; | ||||||||
1704 | DT = DT_; | ||||||||
1705 | MSSA = MSSA_; | ||||||||
1706 | MemorySSAUpdater MSSAU_(MSSA_); | ||||||||
1707 | MSSAU = MSSA_
| ||||||||
1708 | // If we don't have at least memset and memcpy, there is little point of doing | ||||||||
1709 | // anything here. These are required by a freestanding implementation, so if | ||||||||
1710 | // even they are disabled, there is no point in trying hard. | ||||||||
1711 | if (!TLI->has(LibFunc_memset) || !TLI->has(LibFunc_memcpy)) | ||||||||
1712 | return false; | ||||||||
1713 | |||||||||
1714 | while (true) { | ||||||||
1715 | if (!iterateOnFunction(F)) | ||||||||
1716 | break; | ||||||||
1717 | MadeChange = true; | ||||||||
1718 | } | ||||||||
1719 | |||||||||
1720 | if (MSSA_ && VerifyMemorySSA) | ||||||||
1721 | MSSA_->verifyMemorySSA(); | ||||||||
1722 | |||||||||
1723 | MD = nullptr; | ||||||||
1724 | return MadeChange; | ||||||||
1725 | } | ||||||||
1726 | |||||||||
1727 | /// This is the main transformation entry point for a function. | ||||||||
1728 | bool MemCpyOptLegacyPass::runOnFunction(Function &F) { | ||||||||
1729 | if (skipFunction(F)) | ||||||||
| |||||||||
1730 | return false; | ||||||||
1731 | |||||||||
1732 | auto *MDWP = !EnableMemorySSA | ||||||||
1733 | ? &getAnalysis<MemoryDependenceWrapperPass>() | ||||||||
1734 | : getAnalysisIfAvailable<MemoryDependenceWrapperPass>(); | ||||||||
1735 | auto *TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); | ||||||||
1736 | auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); | ||||||||
1737 | auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); | ||||||||
1738 | auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); | ||||||||
1739 | auto *MSSAWP = EnableMemorySSA | ||||||||
1740 | ? &getAnalysis<MemorySSAWrapperPass>() | ||||||||
1741 | : getAnalysisIfAvailable<MemorySSAWrapperPass>(); | ||||||||
1742 | |||||||||
1743 | return Impl.runImpl(F, MDWP ? & MDWP->getMemDep() : nullptr, TLI, AA, AC, DT, | ||||||||
1744 | MSSAWP
| ||||||||
1745 | } |
1 | //===-- llvm/IntrinsicInst.h - Intrinsic Instruction Wrappers ---*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file defines classes that make it really easy to deal with intrinsic |
10 | // functions with the isa/dyncast family of functions. In particular, this |
11 | // allows you to do things like: |
12 | // |
13 | // if (MemCpyInst *MCI = dyn_cast<MemCpyInst>(Inst)) |
14 | // ... MCI->getDest() ... MCI->getSource() ... |
15 | // |
16 | // All intrinsic function calls are instances of the call instruction, so these |
17 | // are all subclasses of the CallInst class. Note that none of these classes |
18 | // has state or virtual methods, which is an important part of this gross/neat |
19 | // hack working. |
20 | // |
21 | //===----------------------------------------------------------------------===// |
22 | |
23 | #ifndef LLVM_IR_INTRINSICINST_H |
24 | #define LLVM_IR_INTRINSICINST_H |
25 | |
26 | #include "llvm/IR/Constants.h" |
27 | #include "llvm/IR/DerivedTypes.h" |
28 | #include "llvm/IR/FPEnv.h" |
29 | #include "llvm/IR/Function.h" |
30 | #include "llvm/IR/GlobalVariable.h" |
31 | #include "llvm/IR/Instructions.h" |
32 | #include "llvm/IR/Intrinsics.h" |
33 | #include "llvm/IR/Metadata.h" |
34 | #include "llvm/IR/Value.h" |
35 | #include "llvm/Support/Casting.h" |
36 | #include <cassert> |
37 | #include <cstdint> |
38 | |
39 | namespace llvm { |
40 | |
41 | /// A wrapper class for inspecting calls to intrinsic functions. |
42 | /// This allows the standard isa/dyncast/cast functionality to work with calls |
43 | /// to intrinsic functions. |
44 | class IntrinsicInst : public CallInst { |
45 | public: |
46 | IntrinsicInst() = delete; |
47 | IntrinsicInst(const IntrinsicInst &) = delete; |
48 | IntrinsicInst &operator=(const IntrinsicInst &) = delete; |
49 | |
50 | /// Return the intrinsic ID of this intrinsic. |
51 | Intrinsic::ID getIntrinsicID() const { |
52 | return getCalledFunction()->getIntrinsicID(); |
53 | } |
54 | |
55 | /// Return true if swapping the first two arguments to the intrinsic produces |
56 | /// the same result. |
57 | bool isCommutative() const { |
58 | switch (getIntrinsicID()) { |
59 | case Intrinsic::maxnum: |
60 | case Intrinsic::minnum: |
61 | case Intrinsic::maximum: |
62 | case Intrinsic::minimum: |
63 | case Intrinsic::smax: |
64 | case Intrinsic::smin: |
65 | case Intrinsic::umax: |
66 | case Intrinsic::umin: |
67 | case Intrinsic::sadd_sat: |
68 | case Intrinsic::uadd_sat: |
69 | case Intrinsic::sadd_with_overflow: |
70 | case Intrinsic::uadd_with_overflow: |
71 | case Intrinsic::smul_with_overflow: |
72 | case Intrinsic::umul_with_overflow: |
73 | case Intrinsic::smul_fix: |
74 | case Intrinsic::umul_fix: |
75 | case Intrinsic::smul_fix_sat: |
76 | case Intrinsic::umul_fix_sat: |
77 | case Intrinsic::fma: |
78 | case Intrinsic::fmuladd: |
79 | return true; |
80 | default: |
81 | return false; |
82 | } |
83 | } |
84 | |
85 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
86 | static bool classof(const CallInst *I) { |
87 | if (const Function *CF = I->getCalledFunction()) |
88 | return CF->isIntrinsic(); |
89 | return false; |
90 | } |
91 | static bool classof(const Value *V) { |
92 | return isa<CallInst>(V) && classof(cast<CallInst>(V)); |
93 | } |
94 | }; |
95 | |
96 | /// Check if \p ID corresponds to a debug info intrinsic. |
97 | static inline bool isDbgInfoIntrinsic(Intrinsic::ID ID) { |
98 | switch (ID) { |
99 | case Intrinsic::dbg_declare: |
100 | case Intrinsic::dbg_value: |
101 | case Intrinsic::dbg_addr: |
102 | case Intrinsic::dbg_label: |
103 | return true; |
104 | default: |
105 | return false; |
106 | } |
107 | } |
108 | |
109 | /// This is the common base class for debug info intrinsics. |
110 | class DbgInfoIntrinsic : public IntrinsicInst { |
111 | public: |
112 | /// \name Casting methods |
113 | /// @{ |
114 | static bool classof(const IntrinsicInst *I) { |
115 | return isDbgInfoIntrinsic(I->getIntrinsicID()); |
116 | } |
117 | static bool classof(const Value *V) { |
118 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
119 | } |
120 | /// @} |
121 | }; |
122 | |
123 | /// This is the common base class for debug info intrinsics for variables. |
124 | class DbgVariableIntrinsic : public DbgInfoIntrinsic { |
125 | public: |
126 | /// Get the location corresponding to the variable referenced by the debug |
127 | /// info intrinsic. Depending on the intrinsic, this could be the |
128 | /// variable's value or its address. |
129 | Value *getVariableLocation(bool AllowNullOp = true) const; |
130 | |
131 | /// Does this describe the address of a local variable. True for dbg.addr |
132 | /// and dbg.declare, but not dbg.value, which describes its value. |
133 | bool isAddressOfVariable() const { |
134 | return getIntrinsicID() != Intrinsic::dbg_value; |
135 | } |
136 | |
137 | DILocalVariable *getVariable() const { |
138 | return cast<DILocalVariable>(getRawVariable()); |
139 | } |
140 | |
141 | DIExpression *getExpression() const { |
142 | return cast<DIExpression>(getRawExpression()); |
143 | } |
144 | |
145 | Metadata *getRawVariable() const { |
146 | return cast<MetadataAsValue>(getArgOperand(1))->getMetadata(); |
147 | } |
148 | |
149 | Metadata *getRawExpression() const { |
150 | return cast<MetadataAsValue>(getArgOperand(2))->getMetadata(); |
151 | } |
152 | |
153 | /// Get the size (in bits) of the variable, or fragment of the variable that |
154 | /// is described. |
155 | Optional<uint64_t> getFragmentSizeInBits() const; |
156 | |
157 | /// \name Casting methods |
158 | /// @{ |
159 | static bool classof(const IntrinsicInst *I) { |
160 | switch (I->getIntrinsicID()) { |
161 | case Intrinsic::dbg_declare: |
162 | case Intrinsic::dbg_value: |
163 | case Intrinsic::dbg_addr: |
164 | return true; |
165 | default: |
166 | return false; |
167 | } |
168 | } |
169 | static bool classof(const Value *V) { |
170 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
171 | } |
172 | /// @} |
173 | }; |
174 | |
175 | /// This represents the llvm.dbg.declare instruction. |
176 | class DbgDeclareInst : public DbgVariableIntrinsic { |
177 | public: |
178 | Value *getAddress() const { return getVariableLocation(); } |
179 | |
180 | /// \name Casting methods |
181 | /// @{ |
182 | static bool classof(const IntrinsicInst *I) { |
183 | return I->getIntrinsicID() == Intrinsic::dbg_declare; |
184 | } |
185 | static bool classof(const Value *V) { |
186 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
187 | } |
188 | /// @} |
189 | }; |
190 | |
191 | /// This represents the llvm.dbg.addr instruction. |
192 | class DbgAddrIntrinsic : public DbgVariableIntrinsic { |
193 | public: |
194 | Value *getAddress() const { return getVariableLocation(); } |
195 | |
196 | /// \name Casting methods |
197 | /// @{ |
198 | static bool classof(const IntrinsicInst *I) { |
199 | return I->getIntrinsicID() == Intrinsic::dbg_addr; |
200 | } |
201 | static bool classof(const Value *V) { |
202 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
203 | } |
204 | }; |
205 | |
206 | /// This represents the llvm.dbg.value instruction. |
207 | class DbgValueInst : public DbgVariableIntrinsic { |
208 | public: |
209 | Value *getValue() const { |
210 | return getVariableLocation(/* AllowNullOp = */ false); |
211 | } |
212 | |
213 | /// \name Casting methods |
214 | /// @{ |
215 | static bool classof(const IntrinsicInst *I) { |
216 | return I->getIntrinsicID() == Intrinsic::dbg_value; |
217 | } |
218 | static bool classof(const Value *V) { |
219 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
220 | } |
221 | /// @} |
222 | }; |
223 | |
224 | /// This represents the llvm.dbg.label instruction. |
225 | class DbgLabelInst : public DbgInfoIntrinsic { |
226 | public: |
227 | DILabel *getLabel() const { return cast<DILabel>(getRawLabel()); } |
228 | |
229 | Metadata *getRawLabel() const { |
230 | return cast<MetadataAsValue>(getArgOperand(0))->getMetadata(); |
231 | } |
232 | |
233 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
234 | /// @{ |
235 | static bool classof(const IntrinsicInst *I) { |
236 | return I->getIntrinsicID() == Intrinsic::dbg_label; |
237 | } |
238 | static bool classof(const Value *V) { |
239 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
240 | } |
241 | /// @} |
242 | }; |
243 | |
244 | /// This is the common base class for vector predication intrinsics. |
245 | class VPIntrinsic : public IntrinsicInst { |
246 | public: |
247 | static Optional<int> GetMaskParamPos(Intrinsic::ID IntrinsicID); |
248 | static Optional<int> GetVectorLengthParamPos(Intrinsic::ID IntrinsicID); |
249 | |
250 | /// The llvm.vp.* intrinsics for this instruction Opcode |
251 | static Intrinsic::ID GetForOpcode(unsigned OC); |
252 | |
253 | // Whether \p ID is a VP intrinsic ID. |
254 | static bool IsVPIntrinsic(Intrinsic::ID); |
255 | |
256 | /// \return the mask parameter or nullptr. |
257 | Value *getMaskParam() const; |
258 | |
259 | /// \return the vector length parameter or nullptr. |
260 | Value *getVectorLengthParam() const; |
261 | |
262 | /// \return whether the vector length param can be ignored. |
263 | bool canIgnoreVectorLengthParam() const; |
264 | |
265 | /// \return the static element count (vector number of elements) the vector |
266 | /// length parameter applies to. |
267 | ElementCount getStaticVectorLength() const; |
268 | |
269 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
270 | static bool classof(const IntrinsicInst *I) { |
271 | return IsVPIntrinsic(I->getIntrinsicID()); |
272 | } |
273 | static bool classof(const Value *V) { |
274 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
275 | } |
276 | |
277 | // Equivalent non-predicated opcode |
278 | unsigned getFunctionalOpcode() const { |
279 | return GetFunctionalOpcodeForVP(getIntrinsicID()); |
280 | } |
281 | |
282 | // Equivalent non-predicated opcode |
283 | static unsigned GetFunctionalOpcodeForVP(Intrinsic::ID ID); |
284 | }; |
285 | |
286 | /// This is the common base class for constrained floating point intrinsics. |
287 | class ConstrainedFPIntrinsic : public IntrinsicInst { |
288 | public: |
289 | bool isUnaryOp() const; |
290 | bool isTernaryOp() const; |
291 | Optional<RoundingMode> getRoundingMode() const; |
292 | Optional<fp::ExceptionBehavior> getExceptionBehavior() const; |
293 | |
294 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
295 | static bool classof(const IntrinsicInst *I); |
296 | static bool classof(const Value *V) { |
297 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
298 | } |
299 | }; |
300 | |
301 | /// Constrained floating point compare intrinsics. |
302 | class ConstrainedFPCmpIntrinsic : public ConstrainedFPIntrinsic { |
303 | public: |
304 | FCmpInst::Predicate getPredicate() const; |
305 | |
306 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
307 | static bool classof(const IntrinsicInst *I) { |
308 | switch (I->getIntrinsicID()) { |
309 | case Intrinsic::experimental_constrained_fcmp: |
310 | case Intrinsic::experimental_constrained_fcmps: |
311 | return true; |
312 | default: |
313 | return false; |
314 | } |
315 | } |
316 | static bool classof(const Value *V) { |
317 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
318 | } |
319 | }; |
320 | |
321 | /// This class represents an intrinsic that is based on a binary operation. |
322 | /// This includes op.with.overflow and saturating add/sub intrinsics. |
323 | class BinaryOpIntrinsic : public IntrinsicInst { |
324 | public: |
325 | static bool classof(const IntrinsicInst *I) { |
326 | switch (I->getIntrinsicID()) { |
327 | case Intrinsic::uadd_with_overflow: |
328 | case Intrinsic::sadd_with_overflow: |
329 | case Intrinsic::usub_with_overflow: |
330 | case Intrinsic::ssub_with_overflow: |
331 | case Intrinsic::umul_with_overflow: |
332 | case Intrinsic::smul_with_overflow: |
333 | case Intrinsic::uadd_sat: |
334 | case Intrinsic::sadd_sat: |
335 | case Intrinsic::usub_sat: |
336 | case Intrinsic::ssub_sat: |
337 | return true; |
338 | default: |
339 | return false; |
340 | } |
341 | } |
342 | static bool classof(const Value *V) { |
343 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
344 | } |
345 | |
346 | Value *getLHS() const { return const_cast<Value *>(getArgOperand(0)); } |
347 | Value *getRHS() const { return const_cast<Value *>(getArgOperand(1)); } |
348 | |
349 | /// Returns the binary operation underlying the intrinsic. |
350 | Instruction::BinaryOps getBinaryOp() const; |
351 | |
352 | /// Whether the intrinsic is signed or unsigned. |
353 | bool isSigned() const; |
354 | |
355 | /// Returns one of OBO::NoSignedWrap or OBO::NoUnsignedWrap. |
356 | unsigned getNoWrapKind() const; |
357 | }; |
358 | |
359 | /// Represents an op.with.overflow intrinsic. |
360 | class WithOverflowInst : public BinaryOpIntrinsic { |
361 | public: |
362 | static bool classof(const IntrinsicInst *I) { |
363 | switch (I->getIntrinsicID()) { |
364 | case Intrinsic::uadd_with_overflow: |
365 | case Intrinsic::sadd_with_overflow: |
366 | case Intrinsic::usub_with_overflow: |
367 | case Intrinsic::ssub_with_overflow: |
368 | case Intrinsic::umul_with_overflow: |
369 | case Intrinsic::smul_with_overflow: |
370 | return true; |
371 | default: |
372 | return false; |
373 | } |
374 | } |
375 | static bool classof(const Value *V) { |
376 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
377 | } |
378 | }; |
379 | |
380 | /// Represents a saturating add/sub intrinsic. |
381 | class SaturatingInst : public BinaryOpIntrinsic { |
382 | public: |
383 | static bool classof(const IntrinsicInst *I) { |
384 | switch (I->getIntrinsicID()) { |
385 | case Intrinsic::uadd_sat: |
386 | case Intrinsic::sadd_sat: |
387 | case Intrinsic::usub_sat: |
388 | case Intrinsic::ssub_sat: |
389 | return true; |
390 | default: |
391 | return false; |
392 | } |
393 | } |
394 | static bool classof(const Value *V) { |
395 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
396 | } |
397 | }; |
398 | |
399 | /// Common base class for all memory intrinsics. Simply provides |
400 | /// common methods. |
401 | /// Written as CRTP to avoid a common base class amongst the |
402 | /// three atomicity hierarchies. |
403 | template <typename Derived> class MemIntrinsicBase : public IntrinsicInst { |
404 | private: |
405 | enum { ARG_DEST = 0, ARG_LENGTH = 2 }; |
406 | |
407 | public: |
408 | Value *getRawDest() const { |
409 | return const_cast<Value *>(getArgOperand(ARG_DEST)); |
410 | } |
411 | const Use &getRawDestUse() const { return getArgOperandUse(ARG_DEST); } |
412 | Use &getRawDestUse() { return getArgOperandUse(ARG_DEST); } |
413 | |
414 | Value *getLength() const { |
415 | return const_cast<Value *>(getArgOperand(ARG_LENGTH)); |
416 | } |
417 | const Use &getLengthUse() const { return getArgOperandUse(ARG_LENGTH); } |
418 | Use &getLengthUse() { return getArgOperandUse(ARG_LENGTH); } |
419 | |
420 | /// This is just like getRawDest, but it strips off any cast |
421 | /// instructions (including addrspacecast) that feed it, giving the |
422 | /// original input. The returned value is guaranteed to be a pointer. |
423 | Value *getDest() const { return getRawDest()->stripPointerCasts(); } |
424 | |
425 | unsigned getDestAddressSpace() const { |
426 | return cast<PointerType>(getRawDest()->getType())->getAddressSpace(); |
427 | } |
428 | |
429 | /// FIXME: Remove this function once transition to Align is over. |
430 | /// Use getDestAlign() instead. |
431 | unsigned getDestAlignment() const { |
432 | if (auto MA = getParamAlign(ARG_DEST)) |
433 | return MA->value(); |
434 | return 0; |
435 | } |
436 | MaybeAlign getDestAlign() const { return getParamAlign(ARG_DEST); } |
437 | |
438 | /// Set the specified arguments of the instruction. |
439 | void setDest(Value *Ptr) { |
440 | assert(getRawDest()->getType() == Ptr->getType() &&((getRawDest()->getType() == Ptr->getType() && "setDest called with pointer of wrong type!" ) ? static_cast<void> (0) : __assert_fail ("getRawDest()->getType() == Ptr->getType() && \"setDest called with pointer of wrong type!\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/IntrinsicInst.h" , 441, __PRETTY_FUNCTION__)) |
441 | "setDest called with pointer of wrong type!")((getRawDest()->getType() == Ptr->getType() && "setDest called with pointer of wrong type!" ) ? static_cast<void> (0) : __assert_fail ("getRawDest()->getType() == Ptr->getType() && \"setDest called with pointer of wrong type!\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/IntrinsicInst.h" , 441, __PRETTY_FUNCTION__)); |
442 | setArgOperand(ARG_DEST, Ptr); |
443 | } |
444 | |
445 | /// FIXME: Remove this function once transition to Align is over. |
446 | /// Use the version that takes MaybeAlign instead of this one. |
447 | void setDestAlignment(unsigned Alignment) { |
448 | setDestAlignment(MaybeAlign(Alignment)); |
449 | } |
450 | void setDestAlignment(MaybeAlign Alignment) { |
451 | removeParamAttr(ARG_DEST, Attribute::Alignment); |
452 | if (Alignment) |
453 | addParamAttr(ARG_DEST, |
454 | Attribute::getWithAlignment(getContext(), *Alignment)); |
455 | } |
456 | void setDestAlignment(Align Alignment) { |
457 | removeParamAttr(ARG_DEST, Attribute::Alignment); |
458 | addParamAttr(ARG_DEST, |
459 | Attribute::getWithAlignment(getContext(), Alignment)); |
460 | } |
461 | |
462 | void setLength(Value *L) { |
463 | assert(getLength()->getType() == L->getType() &&((getLength()->getType() == L->getType() && "setLength called with value of wrong type!" ) ? static_cast<void> (0) : __assert_fail ("getLength()->getType() == L->getType() && \"setLength called with value of wrong type!\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/IntrinsicInst.h" , 464, __PRETTY_FUNCTION__)) |
464 | "setLength called with value of wrong type!")((getLength()->getType() == L->getType() && "setLength called with value of wrong type!" ) ? static_cast<void> (0) : __assert_fail ("getLength()->getType() == L->getType() && \"setLength called with value of wrong type!\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/IntrinsicInst.h" , 464, __PRETTY_FUNCTION__)); |
465 | setArgOperand(ARG_LENGTH, L); |
466 | } |
467 | }; |
468 | |
469 | /// Common base class for all memory transfer intrinsics. Simply provides |
470 | /// common methods. |
471 | template <class BaseCL> class MemTransferBase : public BaseCL { |
472 | private: |
473 | enum { ARG_SOURCE = 1 }; |
474 | |
475 | public: |
476 | /// Return the arguments to the instruction. |
477 | Value *getRawSource() const { |
478 | return const_cast<Value *>(BaseCL::getArgOperand(ARG_SOURCE)); |
479 | } |
480 | const Use &getRawSourceUse() const { |
481 | return BaseCL::getArgOperandUse(ARG_SOURCE); |
482 | } |
483 | Use &getRawSourceUse() { return BaseCL::getArgOperandUse(ARG_SOURCE); } |
484 | |
485 | /// This is just like getRawSource, but it strips off any cast |
486 | /// instructions that feed it, giving the original input. The returned |
487 | /// value is guaranteed to be a pointer. |
488 | Value *getSource() const { return getRawSource()->stripPointerCasts(); } |
489 | |
490 | unsigned getSourceAddressSpace() const { |
491 | return cast<PointerType>(getRawSource()->getType())->getAddressSpace(); |
492 | } |
493 | |
494 | /// FIXME: Remove this function once transition to Align is over. |
495 | /// Use getSourceAlign() instead. |
496 | unsigned getSourceAlignment() const { |
497 | if (auto MA = BaseCL::getParamAlign(ARG_SOURCE)) |
498 | return MA->value(); |
499 | return 0; |
500 | } |
501 | |
502 | MaybeAlign getSourceAlign() const { |
503 | return BaseCL::getParamAlign(ARG_SOURCE); |
504 | } |
505 | |
506 | void setSource(Value *Ptr) { |
507 | assert(getRawSource()->getType() == Ptr->getType() &&((getRawSource()->getType() == Ptr->getType() && "setSource called with pointer of wrong type!") ? static_cast <void> (0) : __assert_fail ("getRawSource()->getType() == Ptr->getType() && \"setSource called with pointer of wrong type!\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/IntrinsicInst.h" , 508, __PRETTY_FUNCTION__)) |
508 | "setSource called with pointer of wrong type!")((getRawSource()->getType() == Ptr->getType() && "setSource called with pointer of wrong type!") ? static_cast <void> (0) : __assert_fail ("getRawSource()->getType() == Ptr->getType() && \"setSource called with pointer of wrong type!\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/IntrinsicInst.h" , 508, __PRETTY_FUNCTION__)); |
509 | BaseCL::setArgOperand(ARG_SOURCE, Ptr); |
510 | } |
511 | |
512 | /// FIXME: Remove this function once transition to Align is over. |
513 | /// Use the version that takes MaybeAlign instead of this one. |
514 | void setSourceAlignment(unsigned Alignment) { |
515 | setSourceAlignment(MaybeAlign(Alignment)); |
516 | } |
517 | void setSourceAlignment(MaybeAlign Alignment) { |
518 | BaseCL::removeParamAttr(ARG_SOURCE, Attribute::Alignment); |
519 | if (Alignment) |
520 | BaseCL::addParamAttr(ARG_SOURCE, Attribute::getWithAlignment( |
521 | BaseCL::getContext(), *Alignment)); |
522 | } |
523 | void setSourceAlignment(Align Alignment) { |
524 | BaseCL::removeParamAttr(ARG_SOURCE, Attribute::Alignment); |
525 | BaseCL::addParamAttr(ARG_SOURCE, Attribute::getWithAlignment( |
526 | BaseCL::getContext(), Alignment)); |
527 | } |
528 | }; |
529 | |
530 | /// Common base class for all memset intrinsics. Simply provides |
531 | /// common methods. |
532 | template <class BaseCL> class MemSetBase : public BaseCL { |
533 | private: |
534 | enum { ARG_VALUE = 1 }; |
535 | |
536 | public: |
537 | Value *getValue() const { |
538 | return const_cast<Value *>(BaseCL::getArgOperand(ARG_VALUE)); |
539 | } |
540 | const Use &getValueUse() const { return BaseCL::getArgOperandUse(ARG_VALUE); } |
541 | Use &getValueUse() { return BaseCL::getArgOperandUse(ARG_VALUE); } |
542 | |
543 | void setValue(Value *Val) { |
544 | assert(getValue()->getType() == Val->getType() &&((getValue()->getType() == Val->getType() && "setValue called with value of wrong type!" ) ? static_cast<void> (0) : __assert_fail ("getValue()->getType() == Val->getType() && \"setValue called with value of wrong type!\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/IntrinsicInst.h" , 545, __PRETTY_FUNCTION__)) |
545 | "setValue called with value of wrong type!")((getValue()->getType() == Val->getType() && "setValue called with value of wrong type!" ) ? static_cast<void> (0) : __assert_fail ("getValue()->getType() == Val->getType() && \"setValue called with value of wrong type!\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/IntrinsicInst.h" , 545, __PRETTY_FUNCTION__)); |
546 | BaseCL::setArgOperand(ARG_VALUE, Val); |
547 | } |
548 | }; |
549 | |
550 | // The common base class for the atomic memset/memmove/memcpy intrinsics |
551 | // i.e. llvm.element.unordered.atomic.memset/memcpy/memmove |
552 | class AtomicMemIntrinsic : public MemIntrinsicBase<AtomicMemIntrinsic> { |
553 | private: |
554 | enum { ARG_ELEMENTSIZE = 3 }; |
555 | |
556 | public: |
557 | Value *getRawElementSizeInBytes() const { |
558 | return const_cast<Value *>(getArgOperand(ARG_ELEMENTSIZE)); |
559 | } |
560 | |
561 | ConstantInt *getElementSizeInBytesCst() const { |
562 | return cast<ConstantInt>(getRawElementSizeInBytes()); |
563 | } |
564 | |
565 | uint32_t getElementSizeInBytes() const { |
566 | return getElementSizeInBytesCst()->getZExtValue(); |
567 | } |
568 | |
569 | void setElementSizeInBytes(Constant *V) { |
570 | assert(V->getType() == Type::getInt8Ty(getContext()) &&((V->getType() == Type::getInt8Ty(getContext()) && "setElementSizeInBytes called with value of wrong type!") ? static_cast <void> (0) : __assert_fail ("V->getType() == Type::getInt8Ty(getContext()) && \"setElementSizeInBytes called with value of wrong type!\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/IntrinsicInst.h" , 571, __PRETTY_FUNCTION__)) |
571 | "setElementSizeInBytes called with value of wrong type!")((V->getType() == Type::getInt8Ty(getContext()) && "setElementSizeInBytes called with value of wrong type!") ? static_cast <void> (0) : __assert_fail ("V->getType() == Type::getInt8Ty(getContext()) && \"setElementSizeInBytes called with value of wrong type!\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/IntrinsicInst.h" , 571, __PRETTY_FUNCTION__)); |
572 | setArgOperand(ARG_ELEMENTSIZE, V); |
573 | } |
574 | |
575 | static bool classof(const IntrinsicInst *I) { |
576 | switch (I->getIntrinsicID()) { |
577 | case Intrinsic::memcpy_element_unordered_atomic: |
578 | case Intrinsic::memmove_element_unordered_atomic: |
579 | case Intrinsic::memset_element_unordered_atomic: |
580 | return true; |
581 | default: |
582 | return false; |
583 | } |
584 | } |
585 | static bool classof(const Value *V) { |
586 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
587 | } |
588 | }; |
589 | |
590 | /// This class represents atomic memset intrinsic |
591 | // i.e. llvm.element.unordered.atomic.memset |
592 | class AtomicMemSetInst : public MemSetBase<AtomicMemIntrinsic> { |
593 | public: |
594 | static bool classof(const IntrinsicInst *I) { |
595 | return I->getIntrinsicID() == Intrinsic::memset_element_unordered_atomic; |
596 | } |
597 | static bool classof(const Value *V) { |
598 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
599 | } |
600 | }; |
601 | |
602 | // This class wraps the atomic memcpy/memmove intrinsics |
603 | // i.e. llvm.element.unordered.atomic.memcpy/memmove |
604 | class AtomicMemTransferInst : public MemTransferBase<AtomicMemIntrinsic> { |
605 | public: |
606 | static bool classof(const IntrinsicInst *I) { |
607 | switch (I->getIntrinsicID()) { |
608 | case Intrinsic::memcpy_element_unordered_atomic: |
609 | case Intrinsic::memmove_element_unordered_atomic: |
610 | return true; |
611 | default: |
612 | return false; |
613 | } |
614 | } |
615 | static bool classof(const Value *V) { |
616 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
617 | } |
618 | }; |
619 | |
620 | /// This class represents the atomic memcpy intrinsic |
621 | /// i.e. llvm.element.unordered.atomic.memcpy |
622 | class AtomicMemCpyInst : public AtomicMemTransferInst { |
623 | public: |
624 | static bool classof(const IntrinsicInst *I) { |
625 | return I->getIntrinsicID() == Intrinsic::memcpy_element_unordered_atomic; |
626 | } |
627 | static bool classof(const Value *V) { |
628 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
629 | } |
630 | }; |
631 | |
632 | /// This class represents the atomic memmove intrinsic |
633 | /// i.e. llvm.element.unordered.atomic.memmove |
634 | class AtomicMemMoveInst : public AtomicMemTransferInst { |
635 | public: |
636 | static bool classof(const IntrinsicInst *I) { |
637 | return I->getIntrinsicID() == Intrinsic::memmove_element_unordered_atomic; |
638 | } |
639 | static bool classof(const Value *V) { |
640 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
641 | } |
642 | }; |
643 | |
644 | /// This is the common base class for memset/memcpy/memmove. |
645 | class MemIntrinsic : public MemIntrinsicBase<MemIntrinsic> { |
646 | private: |
647 | enum { ARG_VOLATILE = 3 }; |
648 | |
649 | public: |
650 | ConstantInt *getVolatileCst() const { |
651 | return cast<ConstantInt>(const_cast<Value *>(getArgOperand(ARG_VOLATILE))); |
652 | } |
653 | |
654 | bool isVolatile() const { return !getVolatileCst()->isZero(); } |
655 | |
656 | void setVolatile(Constant *V) { setArgOperand(ARG_VOLATILE, V); } |
657 | |
658 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
659 | static bool classof(const IntrinsicInst *I) { |
660 | switch (I->getIntrinsicID()) { |
661 | case Intrinsic::memcpy: |
662 | case Intrinsic::memmove: |
663 | case Intrinsic::memset: |
664 | case Intrinsic::memcpy_inline: |
665 | return true; |
666 | default: |
667 | return false; |
668 | } |
669 | } |
670 | static bool classof(const Value *V) { |
671 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
672 | } |
673 | }; |
674 | |
675 | /// This class wraps the llvm.memset intrinsic. |
676 | class MemSetInst : public MemSetBase<MemIntrinsic> { |
677 | public: |
678 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
679 | static bool classof(const IntrinsicInst *I) { |
680 | return I->getIntrinsicID() == Intrinsic::memset; |
681 | } |
682 | static bool classof(const Value *V) { |
683 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
684 | } |
685 | }; |
686 | |
687 | /// This class wraps the llvm.memcpy/memmove intrinsics. |
688 | class MemTransferInst : public MemTransferBase<MemIntrinsic> { |
689 | public: |
690 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
691 | static bool classof(const IntrinsicInst *I) { |
692 | switch (I->getIntrinsicID()) { |
693 | case Intrinsic::memcpy: |
694 | case Intrinsic::memmove: |
695 | case Intrinsic::memcpy_inline: |
696 | return true; |
697 | default: |
698 | return false; |
699 | } |
700 | } |
701 | static bool classof(const Value *V) { |
702 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
703 | } |
704 | }; |
705 | |
706 | /// This class wraps the llvm.memcpy intrinsic. |
707 | class MemCpyInst : public MemTransferInst { |
708 | public: |
709 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
710 | static bool classof(const IntrinsicInst *I) { |
711 | return I->getIntrinsicID() == Intrinsic::memcpy; |
712 | } |
713 | static bool classof(const Value *V) { |
714 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
715 | } |
716 | }; |
717 | |
718 | /// This class wraps the llvm.memmove intrinsic. |
719 | class MemMoveInst : public MemTransferInst { |
720 | public: |
721 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
722 | static bool classof(const IntrinsicInst *I) { |
723 | return I->getIntrinsicID() == Intrinsic::memmove; |
724 | } |
725 | static bool classof(const Value *V) { |
726 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
727 | } |
728 | }; |
729 | |
730 | /// This class wraps the llvm.memcpy.inline intrinsic. |
731 | class MemCpyInlineInst : public MemTransferInst { |
732 | public: |
733 | ConstantInt *getLength() const { |
734 | return cast<ConstantInt>(MemTransferInst::getLength()); |
735 | } |
736 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
737 | static bool classof(const IntrinsicInst *I) { |
738 | return I->getIntrinsicID() == Intrinsic::memcpy_inline; |
739 | } |
740 | static bool classof(const Value *V) { |
741 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
742 | } |
743 | }; |
744 | |
745 | // The common base class for any memset/memmove/memcpy intrinsics; |
746 | // whether they be atomic or non-atomic. |
747 | // i.e. llvm.element.unordered.atomic.memset/memcpy/memmove |
748 | // and llvm.memset/memcpy/memmove |
749 | class AnyMemIntrinsic : public MemIntrinsicBase<AnyMemIntrinsic> { |
750 | public: |
751 | bool isVolatile() const { |
752 | // Only the non-atomic intrinsics can be volatile |
753 | if (auto *MI = dyn_cast<MemIntrinsic>(this)) |
754 | return MI->isVolatile(); |
755 | return false; |
756 | } |
757 | |
758 | static bool classof(const IntrinsicInst *I) { |
759 | switch (I->getIntrinsicID()) { |
760 | case Intrinsic::memcpy: |
761 | case Intrinsic::memcpy_inline: |
762 | case Intrinsic::memmove: |
763 | case Intrinsic::memset: |
764 | case Intrinsic::memcpy_element_unordered_atomic: |
765 | case Intrinsic::memmove_element_unordered_atomic: |
766 | case Intrinsic::memset_element_unordered_atomic: |
767 | return true; |
768 | default: |
769 | return false; |
770 | } |
771 | } |
772 | static bool classof(const Value *V) { |
773 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
774 | } |
775 | }; |
776 | |
777 | /// This class represents any memset intrinsic |
778 | // i.e. llvm.element.unordered.atomic.memset |
779 | // and llvm.memset |
780 | class AnyMemSetInst : public MemSetBase<AnyMemIntrinsic> { |
781 | public: |
782 | static bool classof(const IntrinsicInst *I) { |
783 | switch (I->getIntrinsicID()) { |
784 | case Intrinsic::memset: |
785 | case Intrinsic::memset_element_unordered_atomic: |
786 | return true; |
787 | default: |
788 | return false; |
789 | } |
790 | } |
791 | static bool classof(const Value *V) { |
792 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
793 | } |
794 | }; |
795 | |
796 | // This class wraps any memcpy/memmove intrinsics |
797 | // i.e. llvm.element.unordered.atomic.memcpy/memmove |
798 | // and llvm.memcpy/memmove |
799 | class AnyMemTransferInst : public MemTransferBase<AnyMemIntrinsic> { |
800 | public: |
801 | static bool classof(const IntrinsicInst *I) { |
802 | switch (I->getIntrinsicID()) { |
803 | case Intrinsic::memcpy: |
804 | case Intrinsic::memcpy_inline: |
805 | case Intrinsic::memmove: |
806 | case Intrinsic::memcpy_element_unordered_atomic: |
807 | case Intrinsic::memmove_element_unordered_atomic: |
808 | return true; |
809 | default: |
810 | return false; |
811 | } |
812 | } |
813 | static bool classof(const Value *V) { |
814 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
815 | } |
816 | }; |
817 | |
818 | /// This class represents any memcpy intrinsic |
819 | /// i.e. llvm.element.unordered.atomic.memcpy |
820 | /// and llvm.memcpy |
821 | class AnyMemCpyInst : public AnyMemTransferInst { |
822 | public: |
823 | static bool classof(const IntrinsicInst *I) { |
824 | switch (I->getIntrinsicID()) { |
825 | case Intrinsic::memcpy: |
826 | case Intrinsic::memcpy_inline: |
827 | case Intrinsic::memcpy_element_unordered_atomic: |
828 | return true; |
829 | default: |
830 | return false; |
831 | } |
832 | } |
833 | static bool classof(const Value *V) { |
834 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
835 | } |
836 | }; |
837 | |
838 | /// This class represents any memmove intrinsic |
839 | /// i.e. llvm.element.unordered.atomic.memmove |
840 | /// and llvm.memmove |
841 | class AnyMemMoveInst : public AnyMemTransferInst { |
842 | public: |
843 | static bool classof(const IntrinsicInst *I) { |
844 | switch (I->getIntrinsicID()) { |
845 | case Intrinsic::memmove: |
846 | case Intrinsic::memmove_element_unordered_atomic: |
847 | return true; |
848 | default: |
849 | return false; |
850 | } |
851 | } |
852 | static bool classof(const Value *V) { |
853 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
854 | } |
855 | }; |
856 | |
857 | /// This represents the llvm.va_start intrinsic. |
858 | class VAStartInst : public IntrinsicInst { |
859 | public: |
860 | static bool classof(const IntrinsicInst *I) { |
861 | return I->getIntrinsicID() == Intrinsic::vastart; |
862 | } |
863 | static bool classof(const Value *V) { |
864 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
865 | } |
866 | |
867 | Value *getArgList() const { return const_cast<Value *>(getArgOperand(0)); } |
868 | }; |
869 | |
870 | /// This represents the llvm.va_end intrinsic. |
871 | class VAEndInst : public IntrinsicInst { |
872 | public: |
873 | static bool classof(const IntrinsicInst *I) { |
874 | return I->getIntrinsicID() == Intrinsic::vaend; |
875 | } |
876 | static bool classof(const Value *V) { |
877 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
878 | } |
879 | |
880 | Value *getArgList() const { return const_cast<Value *>(getArgOperand(0)); } |
881 | }; |
882 | |
883 | /// This represents the llvm.va_copy intrinsic. |
884 | class VACopyInst : public IntrinsicInst { |
885 | public: |
886 | static bool classof(const IntrinsicInst *I) { |
887 | return I->getIntrinsicID() == Intrinsic::vacopy; |
888 | } |
889 | static bool classof(const Value *V) { |
890 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
891 | } |
892 | |
893 | Value *getDest() const { return const_cast<Value *>(getArgOperand(0)); } |
894 | Value *getSrc() const { return const_cast<Value *>(getArgOperand(1)); } |
895 | }; |
896 | |
897 | /// This represents the llvm.instrprof_increment intrinsic. |
898 | class InstrProfIncrementInst : public IntrinsicInst { |
899 | public: |
900 | static bool classof(const IntrinsicInst *I) { |
901 | return I->getIntrinsicID() == Intrinsic::instrprof_increment; |
902 | } |
903 | static bool classof(const Value *V) { |
904 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
905 | } |
906 | |
907 | GlobalVariable *getName() const { |
908 | return cast<GlobalVariable>( |
909 | const_cast<Value *>(getArgOperand(0))->stripPointerCasts()); |
910 | } |
911 | |
912 | ConstantInt *getHash() const { |
913 | return cast<ConstantInt>(const_cast<Value *>(getArgOperand(1))); |
914 | } |
915 | |
916 | ConstantInt *getNumCounters() const { |
917 | return cast<ConstantInt>(const_cast<Value *>(getArgOperand(2))); |
918 | } |
919 | |
920 | ConstantInt *getIndex() const { |
921 | return cast<ConstantInt>(const_cast<Value *>(getArgOperand(3))); |
922 | } |
923 | |
924 | Value *getStep() const; |
925 | }; |
926 | |
927 | class InstrProfIncrementInstStep : public InstrProfIncrementInst { |
928 | public: |
929 | static bool classof(const IntrinsicInst *I) { |
930 | return I->getIntrinsicID() == Intrinsic::instrprof_increment_step; |
931 | } |
932 | static bool classof(const Value *V) { |
933 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
934 | } |
935 | }; |
936 | |
937 | /// This represents the llvm.instrprof_value_profile intrinsic. |
938 | class InstrProfValueProfileInst : public IntrinsicInst { |
939 | public: |
940 | static bool classof(const IntrinsicInst *I) { |
941 | return I->getIntrinsicID() == Intrinsic::instrprof_value_profile; |
942 | } |
943 | static bool classof(const Value *V) { |
944 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
945 | } |
946 | |
947 | GlobalVariable *getName() const { |
948 | return cast<GlobalVariable>( |
949 | const_cast<Value *>(getArgOperand(0))->stripPointerCasts()); |
950 | } |
951 | |
952 | ConstantInt *getHash() const { |
953 | return cast<ConstantInt>(const_cast<Value *>(getArgOperand(1))); |
954 | } |
955 | |
956 | Value *getTargetValue() const { |
957 | return cast<Value>(const_cast<Value *>(getArgOperand(2))); |
958 | } |
959 | |
960 | ConstantInt *getValueKind() const { |
961 | return cast<ConstantInt>(const_cast<Value *>(getArgOperand(3))); |
962 | } |
963 | |
964 | // Returns the value site index. |
965 | ConstantInt *getIndex() const { |
966 | return cast<ConstantInt>(const_cast<Value *>(getArgOperand(4))); |
967 | } |
968 | }; |
969 | |
970 | class PseudoProbeInst : public IntrinsicInst { |
971 | public: |
972 | static bool classof(const IntrinsicInst *I) { |
973 | return I->getIntrinsicID() == Intrinsic::pseudoprobe; |
974 | } |
975 | |
976 | static bool classof(const Value *V) { |
977 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
978 | } |
979 | |
980 | ConstantInt *getFuncGuid() const { |
981 | return cast<ConstantInt>(const_cast<Value *>(getArgOperand(0))); |
982 | } |
983 | |
984 | ConstantInt *getAttributes() const { |
985 | return cast<ConstantInt>(const_cast<Value *>(getArgOperand(2))); |
986 | } |
987 | |
988 | ConstantInt *getIndex() const { |
989 | return cast<ConstantInt>(const_cast<Value *>(getArgOperand(1))); |
990 | } |
991 | }; |
992 | |
993 | class NoAliasScopeDeclInst : public IntrinsicInst { |
994 | public: |
995 | static bool classof(const IntrinsicInst *I) { |
996 | return I->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl; |
997 | } |
998 | |
999 | static bool classof(const Value *V) { |
1000 | return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); |
1001 | } |
1002 | |
1003 | MDNode *getScopeList() const { |
1004 | auto *MV = |
1005 | cast<MetadataAsValue>(getOperand(Intrinsic::NoAliasScopeDeclScopeArg)); |
1006 | return cast<MDNode>(MV->getMetadata()); |
1007 | } |
1008 | |
1009 | void setScopeList(MDNode *ScopeList) { |
1010 | setOperand(Intrinsic::NoAliasScopeDeclScopeArg, |
1011 | MetadataAsValue::get(getContext(), ScopeList)); |
1012 | } |
1013 | }; |
1014 | |
1015 | } // end namespace llvm |
1016 | |
1017 | #endif // LLVM_IR_INTRINSICINST_H |
1 | //===-- llvm/Constants.h - Constant class subclass definitions --*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | /// @file |
10 | /// This file contains the declarations for the subclasses of Constant, |
11 | /// which represent the different flavors of constant values that live in LLVM. |
12 | /// Note that Constants are immutable (once created they never change) and are |
13 | /// fully shared by structural equivalence. This means that two structurally |
14 | /// equivalent constants will always have the same address. Constants are |
15 | /// created on demand as needed and never deleted: thus clients don't have to |
16 | /// worry about the lifetime of the objects. |
17 | // |
18 | //===----------------------------------------------------------------------===// |
19 | |
20 | #ifndef LLVM_IR_CONSTANTS_H |
21 | #define LLVM_IR_CONSTANTS_H |
22 | |
23 | #include "llvm/ADT/APFloat.h" |
24 | #include "llvm/ADT/APInt.h" |
25 | #include "llvm/ADT/ArrayRef.h" |
26 | #include "llvm/ADT/None.h" |
27 | #include "llvm/ADT/Optional.h" |
28 | #include "llvm/ADT/STLExtras.h" |
29 | #include "llvm/ADT/StringRef.h" |
30 | #include "llvm/IR/Constant.h" |
31 | #include "llvm/IR/DerivedTypes.h" |
32 | #include "llvm/IR/OperandTraits.h" |
33 | #include "llvm/IR/User.h" |
34 | #include "llvm/IR/Value.h" |
35 | #include "llvm/Support/Casting.h" |
36 | #include "llvm/Support/Compiler.h" |
37 | #include "llvm/Support/ErrorHandling.h" |
38 | #include <cassert> |
39 | #include <cstddef> |
40 | #include <cstdint> |
41 | |
42 | namespace llvm { |
43 | |
44 | template <class ConstantClass> struct ConstantAggrKeyType; |
45 | |
46 | /// Base class for constants with no operands. |
47 | /// |
48 | /// These constants have no operands; they represent their data directly. |
49 | /// Since they can be in use by unrelated modules (and are never based on |
50 | /// GlobalValues), it never makes sense to RAUW them. |
51 | class ConstantData : public Constant { |
52 | friend class Constant; |
53 | |
54 | Value *handleOperandChangeImpl(Value *From, Value *To) { |
55 | llvm_unreachable("Constant data does not have operands!")::llvm::llvm_unreachable_internal("Constant data does not have operands!" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/Constants.h" , 55); |
56 | } |
57 | |
58 | protected: |
59 | explicit ConstantData(Type *Ty, ValueTy VT) : Constant(Ty, VT, nullptr, 0) {} |
60 | |
61 | void *operator new(size_t s) { return User::operator new(s, 0); } |
62 | |
63 | public: |
64 | ConstantData(const ConstantData &) = delete; |
65 | |
66 | /// Methods to support type inquiry through isa, cast, and dyn_cast. |
67 | static bool classof(const Value *V) { |
68 | return V->getValueID() >= ConstantDataFirstVal && |
69 | V->getValueID() <= ConstantDataLastVal; |
70 | } |
71 | }; |
72 | |
73 | //===----------------------------------------------------------------------===// |
74 | /// This is the shared class of boolean and integer constants. This class |
75 | /// represents both boolean and integral constants. |
76 | /// Class for constant integers. |
77 | class ConstantInt final : public ConstantData { |
78 | friend class Constant; |
79 | |
80 | APInt Val; |
81 | |
82 | ConstantInt(IntegerType *Ty, const APInt& V); |
83 | |
84 | void destroyConstantImpl(); |
85 | |
86 | public: |
87 | ConstantInt(const ConstantInt &) = delete; |
88 | |
89 | static ConstantInt *getTrue(LLVMContext &Context); |
90 | static ConstantInt *getFalse(LLVMContext &Context); |
91 | static ConstantInt *getBool(LLVMContext &Context, bool V); |
92 | static Constant *getTrue(Type *Ty); |
93 | static Constant *getFalse(Type *Ty); |
94 | static Constant *getBool(Type *Ty, bool V); |
95 | |
96 | /// If Ty is a vector type, return a Constant with a splat of the given |
97 | /// value. Otherwise return a ConstantInt for the given value. |
98 | static Constant *get(Type *Ty, uint64_t V, bool isSigned = false); |
99 | |
100 | /// Return a ConstantInt with the specified integer value for the specified |
101 | /// type. If the type is wider than 64 bits, the value will be zero-extended |
102 | /// to fit the type, unless isSigned is true, in which case the value will |
103 | /// be interpreted as a 64-bit signed integer and sign-extended to fit |
104 | /// the type. |
105 | /// Get a ConstantInt for a specific value. |
106 | static ConstantInt *get(IntegerType *Ty, uint64_t V, |
107 | bool isSigned = false); |
108 | |
109 | /// Return a ConstantInt with the specified value for the specified type. The |
110 | /// value V will be canonicalized to a an unsigned APInt. Accessing it with |
111 | /// either getSExtValue() or getZExtValue() will yield a correctly sized and |
112 | /// signed value for the type Ty. |
113 | /// Get a ConstantInt for a specific signed value. |
114 | static ConstantInt *getSigned(IntegerType *Ty, int64_t V); |
115 | static Constant *getSigned(Type *Ty, int64_t V); |
116 | |
117 | /// Return a ConstantInt with the specified value and an implied Type. The |
118 | /// type is the integer type that corresponds to the bit width of the value. |
119 | static ConstantInt *get(LLVMContext &Context, const APInt &V); |
120 | |
121 | /// Return a ConstantInt constructed from the string strStart with the given |
122 | /// radix. |
123 | static ConstantInt *get(IntegerType *Ty, StringRef Str, |
124 | uint8_t radix); |
125 | |
126 | /// If Ty is a vector type, return a Constant with a splat of the given |
127 | /// value. Otherwise return a ConstantInt for the given value. |
128 | static Constant *get(Type* Ty, const APInt& V); |
129 | |
130 | /// Return the constant as an APInt value reference. This allows clients to |
131 | /// obtain a full-precision copy of the value. |
132 | /// Return the constant's value. |
133 | inline const APInt &getValue() const { |
134 | return Val; |
135 | } |
136 | |
137 | /// getBitWidth - Return the bitwidth of this constant. |
138 | unsigned getBitWidth() const { return Val.getBitWidth(); } |
139 | |
140 | /// Return the constant as a 64-bit unsigned integer value after it |
141 | /// has been zero extended as appropriate for the type of this constant. Note |
142 | /// that this method can assert if the value does not fit in 64 bits. |
143 | /// Return the zero extended value. |
144 | inline uint64_t getZExtValue() const { |
145 | return Val.getZExtValue(); |
146 | } |
147 | |
148 | /// Return the constant as a 64-bit integer value after it has been sign |
149 | /// extended as appropriate for the type of this constant. Note that |
150 | /// this method can assert if the value does not fit in 64 bits. |
151 | /// Return the sign extended value. |
152 | inline int64_t getSExtValue() const { |
153 | return Val.getSExtValue(); |
154 | } |
155 | |
156 | /// Return the constant as an llvm::MaybeAlign. |
157 | /// Note that this method can assert if the value does not fit in 64 bits or |
158 | /// is not a power of two. |
159 | inline MaybeAlign getMaybeAlignValue() const { |
160 | return MaybeAlign(getZExtValue()); |
161 | } |
162 | |
163 | /// Return the constant as an llvm::Align, interpreting `0` as `Align(1)`. |
164 | /// Note that this method can assert if the value does not fit in 64 bits or |
165 | /// is not a power of two. |
166 | inline Align getAlignValue() const { |
167 | return getMaybeAlignValue().valueOrOne(); |
168 | } |
169 | |
170 | /// A helper method that can be used to determine if the constant contained |
171 | /// within is equal to a constant. This only works for very small values, |
172 | /// because this is all that can be represented with all types. |
173 | /// Determine if this constant's value is same as an unsigned char. |
174 | bool equalsInt(uint64_t V) const { |
175 | return Val == V; |
176 | } |
177 | |
178 | /// getType - Specialize the getType() method to always return an IntegerType, |
179 | /// which reduces the amount of casting needed in parts of the compiler. |
180 | /// |
181 | inline IntegerType *getType() const { |
182 | return cast<IntegerType>(Value::getType()); |
183 | } |
184 | |
185 | /// This static method returns true if the type Ty is big enough to |
186 | /// represent the value V. This can be used to avoid having the get method |
187 | /// assert when V is larger than Ty can represent. Note that there are two |
188 | /// versions of this method, one for unsigned and one for signed integers. |
189 | /// Although ConstantInt canonicalizes everything to an unsigned integer, |
190 | /// the signed version avoids callers having to convert a signed quantity |
191 | /// to the appropriate unsigned type before calling the method. |
192 | /// @returns true if V is a valid value for type Ty |
193 | /// Determine if the value is in range for the given type. |
194 | static bool isValueValidForType(Type *Ty, uint64_t V); |
195 | static bool isValueValidForType(Type *Ty, int64_t V); |
196 | |
197 | bool isNegative() const { return Val.isNegative(); } |
198 | |
199 | /// This is just a convenience method to make client code smaller for a |
200 | /// common code. It also correctly performs the comparison without the |
201 | /// potential for an assertion from getZExtValue(). |
202 | bool isZero() const { |
203 | return Val.isNullValue(); |
204 | } |
205 | |
206 | /// This is just a convenience method to make client code smaller for a |
207 | /// common case. It also correctly performs the comparison without the |
208 | /// potential for an assertion from getZExtValue(). |
209 | /// Determine if the value is one. |
210 | bool isOne() const { |
211 | return Val.isOneValue(); |
212 | } |
213 | |
214 | /// This function will return true iff every bit in this constant is set |
215 | /// to true. |
216 | /// @returns true iff this constant's bits are all set to true. |
217 | /// Determine if the value is all ones. |
218 | bool isMinusOne() const { |
219 | return Val.isAllOnesValue(); |
220 | } |
221 | |
222 | /// This function will return true iff this constant represents the largest |
223 | /// value that may be represented by the constant's type. |
224 | /// @returns true iff this is the largest value that may be represented |
225 | /// by this type. |
226 | /// Determine if the value is maximal. |
227 | bool isMaxValue(bool isSigned) const { |
228 | if (isSigned) |
229 | return Val.isMaxSignedValue(); |
230 | else |
231 | return Val.isMaxValue(); |
232 | } |
233 | |
234 | /// This function will return true iff this constant represents the smallest |
235 | /// value that may be represented by this constant's type. |
236 | /// @returns true if this is the smallest value that may be represented by |
237 | /// this type. |
238 | /// Determine if the value is minimal. |
239 | bool isMinValue(bool isSigned) const { |
240 | if (isSigned) |
241 | return Val.isMinSignedValue(); |
242 | else |
243 | return Val.isMinValue(); |
244 | } |
245 | |
246 | /// This function will return true iff this constant represents a value with |
247 | /// active bits bigger than 64 bits or a value greater than the given uint64_t |
248 | /// value. |
249 | /// @returns true iff this constant is greater or equal to the given number. |
250 | /// Determine if the value is greater or equal to the given number. |
251 | bool uge(uint64_t Num) const { |
252 | return Val.uge(Num); |
253 | } |
254 | |
255 | /// getLimitedValue - If the value is smaller than the specified limit, |
256 | /// return it, otherwise return the limit value. This causes the value |
257 | /// to saturate to the limit. |
258 | /// @returns the min of the value of the constant and the specified value |
259 | /// Get the constant's value with a saturation limit |
260 | uint64_t getLimitedValue(uint64_t Limit = ~0ULL) const { |
261 | return Val.getLimitedValue(Limit); |
262 | } |
263 | |
264 | /// Methods to support type inquiry through isa, cast, and dyn_cast. |
265 | static bool classof(const Value *V) { |
266 | return V->getValueID() == ConstantIntVal; |
267 | } |
268 | }; |
269 | |
270 | //===----------------------------------------------------------------------===// |
271 | /// ConstantFP - Floating Point Values [float, double] |
272 | /// |
273 | class ConstantFP final : public ConstantData { |
274 | friend class Constant; |
275 | |
276 | APFloat Val; |
277 | |
278 | ConstantFP(Type *Ty, const APFloat& V); |
279 | |
280 | void destroyConstantImpl(); |
281 | |
282 | public: |
283 | ConstantFP(const ConstantFP &) = delete; |
284 | |
285 | /// Floating point negation must be implemented with f(x) = -0.0 - x. This |
286 | /// method returns the negative zero constant for floating point or vector |
287 | /// floating point types; for all other types, it returns the null value. |
288 | static Constant *getZeroValueForNegation(Type *Ty); |
289 | |
290 | /// This returns a ConstantFP, or a vector containing a splat of a ConstantFP, |
291 | /// for the specified value in the specified type. This should only be used |
292 | /// for simple constant values like 2.0/1.0 etc, that are known-valid both as |
293 | /// host double and as the target format. |
294 | static Constant *get(Type* Ty, double V); |
295 | |
296 | /// If Ty is a vector type, return a Constant with a splat of the given |
297 | /// value. Otherwise return a ConstantFP for the given value. |
298 | static Constant *get(Type *Ty, const APFloat &V); |
299 | |
300 | static Constant *get(Type* Ty, StringRef Str); |
301 | static ConstantFP *get(LLVMContext &Context, const APFloat &V); |
302 | static Constant *getNaN(Type *Ty, bool Negative = false, uint64_t Payload = 0); |
303 | static Constant *getQNaN(Type *Ty, bool Negative = false, |
304 | APInt *Payload = nullptr); |
305 | static Constant *getSNaN(Type *Ty, bool Negative = false, |
306 | APInt *Payload = nullptr); |
307 | static Constant *getNegativeZero(Type *Ty); |
308 | static Constant *getInfinity(Type *Ty, bool Negative = false); |
309 | |
310 | /// Return true if Ty is big enough to represent V. |
311 | static bool isValueValidForType(Type *Ty, const APFloat &V); |
312 | inline const APFloat &getValueAPF() const { return Val; } |
313 | inline const APFloat &getValue() const { return Val; } |
314 | |
315 | /// Return true if the value is positive or negative zero. |
316 | bool isZero() const { return Val.isZero(); } |
317 | |
318 | /// Return true if the sign bit is set. |
319 | bool isNegative() const { return Val.isNegative(); } |
320 | |
321 | /// Return true if the value is infinity |
322 | bool isInfinity() const { return Val.isInfinity(); } |
323 | |
324 | /// Return true if the value is a NaN. |
325 | bool isNaN() const { return Val.isNaN(); } |
326 | |
327 | /// We don't rely on operator== working on double values, as it returns true |
328 | /// for things that are clearly not equal, like -0.0 and 0.0. |
329 | /// As such, this method can be used to do an exact bit-for-bit comparison of |
330 | /// two floating point values. The version with a double operand is retained |
331 | /// because it's so convenient to write isExactlyValue(2.0), but please use |
332 | /// it only for simple constants. |
333 | bool isExactlyValue(const APFloat &V) const; |
334 | |
335 | bool isExactlyValue(double V) const { |
336 | bool ignored; |
337 | APFloat FV(V); |
338 | FV.convert(Val.getSemantics(), APFloat::rmNearestTiesToEven, &ignored); |
339 | return isExactlyValue(FV); |
340 | } |
341 | |
342 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
343 | static bool classof(const Value *V) { |
344 | return V->getValueID() == ConstantFPVal; |
345 | } |
346 | }; |
347 | |
348 | //===----------------------------------------------------------------------===// |
349 | /// All zero aggregate value |
350 | /// |
351 | class ConstantAggregateZero final : public ConstantData { |
352 | friend class Constant; |
353 | |
354 | explicit ConstantAggregateZero(Type *Ty) |
355 | : ConstantData(Ty, ConstantAggregateZeroVal) {} |
356 | |
357 | void destroyConstantImpl(); |
358 | |
359 | public: |
360 | ConstantAggregateZero(const ConstantAggregateZero &) = delete; |
361 | |
362 | static ConstantAggregateZero *get(Type *Ty); |
363 | |
364 | /// If this CAZ has array or vector type, return a zero with the right element |
365 | /// type. |
366 | Constant *getSequentialElement() const; |
367 | |
368 | /// If this CAZ has struct type, return a zero with the right element type for |
369 | /// the specified element. |
370 | Constant *getStructElement(unsigned Elt) const; |
371 | |
372 | /// Return a zero of the right value for the specified GEP index if we can, |
373 | /// otherwise return null (e.g. if C is a ConstantExpr). |
374 | Constant *getElementValue(Constant *C) const; |
375 | |
376 | /// Return a zero of the right value for the specified GEP index. |
377 | Constant *getElementValue(unsigned Idx) const; |
378 | |
379 | /// Return the number of elements in the array, vector, or struct. |
380 | unsigned getNumElements() const; |
381 | |
382 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
383 | /// |
384 | static bool classof(const Value *V) { |
385 | return V->getValueID() == ConstantAggregateZeroVal; |
386 | } |
387 | }; |
388 | |
389 | /// Base class for aggregate constants (with operands). |
390 | /// |
391 | /// These constants are aggregates of other constants, which are stored as |
392 | /// operands. |
393 | /// |
394 | /// Subclasses are \a ConstantStruct, \a ConstantArray, and \a |
395 | /// ConstantVector. |
396 | /// |
397 | /// \note Some subclasses of \a ConstantData are semantically aggregates -- |
398 | /// such as \a ConstantDataArray -- but are not subclasses of this because they |
399 | /// use operands. |
400 | class ConstantAggregate : public Constant { |
401 | protected: |
402 | ConstantAggregate(Type *T, ValueTy VT, ArrayRef<Constant *> V); |
403 | |
404 | public: |
405 | /// Transparently provide more efficient getOperand methods. |
406 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Constant)public: inline Constant *getOperand(unsigned) const; inline void setOperand(unsigned, Constant*); inline op_iterator op_begin (); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
407 | |
408 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
409 | static bool classof(const Value *V) { |
410 | return V->getValueID() >= ConstantAggregateFirstVal && |
411 | V->getValueID() <= ConstantAggregateLastVal; |
412 | } |
413 | }; |
414 | |
415 | template <> |
416 | struct OperandTraits<ConstantAggregate> |
417 | : public VariadicOperandTraits<ConstantAggregate> {}; |
418 | |
419 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ConstantAggregate, Constant)ConstantAggregate::op_iterator ConstantAggregate::op_begin() { return OperandTraits<ConstantAggregate>::op_begin(this ); } ConstantAggregate::const_op_iterator ConstantAggregate:: op_begin() const { return OperandTraits<ConstantAggregate> ::op_begin(const_cast<ConstantAggregate*>(this)); } ConstantAggregate ::op_iterator ConstantAggregate::op_end() { return OperandTraits <ConstantAggregate>::op_end(this); } ConstantAggregate:: const_op_iterator ConstantAggregate::op_end() const { return OperandTraits <ConstantAggregate>::op_end(const_cast<ConstantAggregate *>(this)); } Constant *ConstantAggregate::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<ConstantAggregate >::operands(this) && "getOperand() out of range!") ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ConstantAggregate>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/Constants.h" , 419, __PRETTY_FUNCTION__)); return cast_or_null<Constant >( OperandTraits<ConstantAggregate>::op_begin(const_cast <ConstantAggregate*>(this))[i_nocapture].get()); } void ConstantAggregate::setOperand(unsigned i_nocapture, Constant *Val_nocapture) { ((i_nocapture < OperandTraits<ConstantAggregate >::operands(this) && "setOperand() out of range!") ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ConstantAggregate>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/Constants.h" , 419, __PRETTY_FUNCTION__)); OperandTraits<ConstantAggregate >::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned ConstantAggregate::getNumOperands() const { return OperandTraits <ConstantAggregate>::operands(this); } template <int Idx_nocapture> Use &ConstantAggregate::Op() { return this ->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture > const Use &ConstantAggregate::Op() const { return this ->OpFrom<Idx_nocapture>(this); } |
420 | |
421 | //===----------------------------------------------------------------------===// |
422 | /// ConstantArray - Constant Array Declarations |
423 | /// |
424 | class ConstantArray final : public ConstantAggregate { |
425 | friend struct ConstantAggrKeyType<ConstantArray>; |
426 | friend class Constant; |
427 | |
428 | ConstantArray(ArrayType *T, ArrayRef<Constant *> Val); |
429 | |
430 | void destroyConstantImpl(); |
431 | Value *handleOperandChangeImpl(Value *From, Value *To); |
432 | |
433 | public: |
434 | // ConstantArray accessors |
435 | static Constant *get(ArrayType *T, ArrayRef<Constant*> V); |
436 | |
437 | private: |
438 | static Constant *getImpl(ArrayType *T, ArrayRef<Constant *> V); |
439 | |
440 | public: |
441 | /// Specialize the getType() method to always return an ArrayType, |
442 | /// which reduces the amount of casting needed in parts of the compiler. |
443 | inline ArrayType *getType() const { |
444 | return cast<ArrayType>(Value::getType()); |
445 | } |
446 | |
447 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
448 | static bool classof(const Value *V) { |
449 | return V->getValueID() == ConstantArrayVal; |
450 | } |
451 | }; |
452 | |
453 | //===----------------------------------------------------------------------===// |
454 | // Constant Struct Declarations |
455 | // |
456 | class ConstantStruct final : public ConstantAggregate { |
457 | friend struct ConstantAggrKeyType<ConstantStruct>; |
458 | friend class Constant; |
459 | |
460 | ConstantStruct(StructType *T, ArrayRef<Constant *> Val); |
461 | |
462 | void destroyConstantImpl(); |
463 | Value *handleOperandChangeImpl(Value *From, Value *To); |
464 | |
465 | public: |
466 | // ConstantStruct accessors |
467 | static Constant *get(StructType *T, ArrayRef<Constant*> V); |
468 | |
469 | template <typename... Csts> |
470 | static std::enable_if_t<are_base_of<Constant, Csts...>::value, Constant *> |
471 | get(StructType *T, Csts *... Vs) { |
472 | SmallVector<Constant *, 8> Values({Vs...}); |
473 | return get(T, Values); |
474 | } |
475 | |
476 | /// Return an anonymous struct that has the specified elements. |
477 | /// If the struct is possibly empty, then you must specify a context. |
478 | static Constant *getAnon(ArrayRef<Constant*> V, bool Packed = false) { |
479 | return get(getTypeForElements(V, Packed), V); |
480 | } |
481 | static Constant *getAnon(LLVMContext &Ctx, |
482 | ArrayRef<Constant*> V, bool Packed = false) { |
483 | return get(getTypeForElements(Ctx, V, Packed), V); |
484 | } |
485 | |
486 | /// Return an anonymous struct type to use for a constant with the specified |
487 | /// set of elements. The list must not be empty. |
488 | static StructType *getTypeForElements(ArrayRef<Constant*> V, |
489 | bool Packed = false); |
490 | /// This version of the method allows an empty list. |
491 | static StructType *getTypeForElements(LLVMContext &Ctx, |
492 | ArrayRef<Constant*> V, |
493 | bool Packed = false); |
494 | |
495 | /// Specialization - reduce amount of casting. |
496 | inline StructType *getType() const { |
497 | return cast<StructType>(Value::getType()); |
498 | } |
499 | |
500 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
501 | static bool classof(const Value *V) { |
502 | return V->getValueID() == ConstantStructVal; |
503 | } |
504 | }; |
505 | |
506 | //===----------------------------------------------------------------------===// |
507 | /// Constant Vector Declarations |
508 | /// |
509 | class ConstantVector final : public ConstantAggregate { |
510 | friend struct ConstantAggrKeyType<ConstantVector>; |
511 | friend class Constant; |
512 | |
513 | ConstantVector(VectorType *T, ArrayRef<Constant *> Val); |
514 | |
515 | void destroyConstantImpl(); |
516 | Value *handleOperandChangeImpl(Value *From, Value *To); |
517 | |
518 | public: |
519 | // ConstantVector accessors |
520 | static Constant *get(ArrayRef<Constant*> V); |
521 | |
522 | private: |
523 | static Constant *getImpl(ArrayRef<Constant *> V); |
524 | |
525 | public: |
526 | /// Return a ConstantVector with the specified constant in each element. |
527 | /// Note that this might not return an instance of ConstantVector |
528 | static Constant *getSplat(ElementCount EC, Constant *Elt); |
529 | |
530 | /// Specialize the getType() method to always return a FixedVectorType, |
531 | /// which reduces the amount of casting needed in parts of the compiler. |
532 | inline FixedVectorType *getType() const { |
533 | return cast<FixedVectorType>(Value::getType()); |
534 | } |
535 | |
536 | /// If all elements of the vector constant have the same value, return that |
537 | /// value. Otherwise, return nullptr. Ignore undefined elements by setting |
538 | /// AllowUndefs to true. |
539 | Constant *getSplatValue(bool AllowUndefs = false) const; |
540 | |
541 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
542 | static bool classof(const Value *V) { |
543 | return V->getValueID() == ConstantVectorVal; |
544 | } |
545 | }; |
546 | |
547 | //===----------------------------------------------------------------------===// |
548 | /// A constant pointer value that points to null |
549 | /// |
550 | class ConstantPointerNull final : public ConstantData { |
551 | friend class Constant; |
552 | |
553 | explicit ConstantPointerNull(PointerType *T) |
554 | : ConstantData(T, Value::ConstantPointerNullVal) {} |
555 | |
556 | void destroyConstantImpl(); |
557 | |
558 | public: |
559 | ConstantPointerNull(const ConstantPointerNull &) = delete; |
560 | |
561 | /// Static factory methods - Return objects of the specified value |
562 | static ConstantPointerNull *get(PointerType *T); |
563 | |
564 | /// Specialize the getType() method to always return an PointerType, |
565 | /// which reduces the amount of casting needed in parts of the compiler. |
566 | inline PointerType *getType() const { |
567 | return cast<PointerType>(Value::getType()); |
568 | } |
569 | |
570 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
571 | static bool classof(const Value *V) { |
572 | return V->getValueID() == ConstantPointerNullVal; |
573 | } |
574 | }; |
575 | |
576 | //===----------------------------------------------------------------------===// |
577 | /// ConstantDataSequential - A vector or array constant whose element type is a |
578 | /// simple 1/2/4/8-byte integer or float/double, and whose elements are just |
579 | /// simple data values (i.e. ConstantInt/ConstantFP). This Constant node has no |
580 | /// operands because it stores all of the elements of the constant as densely |
581 | /// packed data, instead of as Value*'s. |
582 | /// |
583 | /// This is the common base class of ConstantDataArray and ConstantDataVector. |
584 | /// |
585 | class ConstantDataSequential : public ConstantData { |
586 | friend class LLVMContextImpl; |
587 | friend class Constant; |
588 | |
589 | /// A pointer to the bytes underlying this constant (which is owned by the |
590 | /// uniquing StringMap). |
591 | const char *DataElements; |
592 | |
593 | /// This forms a link list of ConstantDataSequential nodes that have |
594 | /// the same value but different type. For example, 0,0,0,1 could be a 4 |
595 | /// element array of i8, or a 1-element array of i32. They'll both end up in |
596 | /// the same StringMap bucket, linked up. |
597 | std::unique_ptr<ConstantDataSequential> Next; |
598 | |
599 | void destroyConstantImpl(); |
600 | |
601 | protected: |
602 | explicit ConstantDataSequential(Type *ty, ValueTy VT, const char *Data) |
603 | : ConstantData(ty, VT), DataElements(Data) {} |
604 | |
605 | static Constant *getImpl(StringRef Bytes, Type *Ty); |
606 | |
607 | public: |
608 | ConstantDataSequential(const ConstantDataSequential &) = delete; |
609 | |
610 | /// Return true if a ConstantDataSequential can be formed with a vector or |
611 | /// array of the specified element type. |
612 | /// ConstantDataArray only works with normal float and int types that are |
613 | /// stored densely in memory, not with things like i42 or x86_f80. |
614 | static bool isElementTypeCompatible(Type *Ty); |
615 | |
616 | /// If this is a sequential container of integers (of any size), return the |
617 | /// specified element in the low bits of a uint64_t. |
618 | uint64_t getElementAsInteger(unsigned i) const; |
619 | |
620 | /// If this is a sequential container of integers (of any size), return the |
621 | /// specified element as an APInt. |
622 | APInt getElementAsAPInt(unsigned i) const; |
623 | |
624 | /// If this is a sequential container of floating point type, return the |
625 | /// specified element as an APFloat. |
626 | APFloat getElementAsAPFloat(unsigned i) const; |
627 | |
628 | /// If this is an sequential container of floats, return the specified element |
629 | /// as a float. |
630 | float getElementAsFloat(unsigned i) const; |
631 | |
632 | /// If this is an sequential container of doubles, return the specified |
633 | /// element as a double. |
634 | double getElementAsDouble(unsigned i) const; |
635 | |
636 | /// Return a Constant for a specified index's element. |
637 | /// Note that this has to compute a new constant to return, so it isn't as |
638 | /// efficient as getElementAsInteger/Float/Double. |
639 | Constant *getElementAsConstant(unsigned i) const; |
640 | |
641 | /// Return the element type of the array/vector. |
642 | Type *getElementType() const; |
643 | |
644 | /// Return the number of elements in the array or vector. |
645 | unsigned getNumElements() const; |
646 | |
647 | /// Return the size (in bytes) of each element in the array/vector. |
648 | /// The size of the elements is known to be a multiple of one byte. |
649 | uint64_t getElementByteSize() const; |
650 | |
651 | /// This method returns true if this is an array of \p CharSize integers. |
652 | bool isString(unsigned CharSize = 8) const; |
653 | |
654 | /// This method returns true if the array "isString", ends with a null byte, |
655 | /// and does not contains any other null bytes. |
656 | bool isCString() const; |
657 | |
658 | /// If this array is isString(), then this method returns the array as a |
659 | /// StringRef. Otherwise, it asserts out. |
660 | StringRef getAsString() const { |
661 | assert(isString() && "Not a string")((isString() && "Not a string") ? static_cast<void > (0) : __assert_fail ("isString() && \"Not a string\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/Constants.h" , 661, __PRETTY_FUNCTION__)); |
662 | return getRawDataValues(); |
663 | } |
664 | |
665 | /// If this array is isCString(), then this method returns the array (without |
666 | /// the trailing null byte) as a StringRef. Otherwise, it asserts out. |
667 | StringRef getAsCString() const { |
668 | assert(isCString() && "Isn't a C string")((isCString() && "Isn't a C string") ? static_cast< void> (0) : __assert_fail ("isCString() && \"Isn't a C string\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/Constants.h" , 668, __PRETTY_FUNCTION__)); |
669 | StringRef Str = getAsString(); |
670 | return Str.substr(0, Str.size()-1); |
671 | } |
672 | |
673 | /// Return the raw, underlying, bytes of this data. Note that this is an |
674 | /// extremely tricky thing to work with, as it exposes the host endianness of |
675 | /// the data elements. |
676 | StringRef getRawDataValues() const; |
677 | |
678 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
679 | static bool classof(const Value *V) { |
680 | return V->getValueID() == ConstantDataArrayVal || |
681 | V->getValueID() == ConstantDataVectorVal; |
682 | } |
683 | |
684 | private: |
685 | const char *getElementPointer(unsigned Elt) const; |
686 | }; |
687 | |
688 | //===----------------------------------------------------------------------===// |
689 | /// An array constant whose element type is a simple 1/2/4/8-byte integer or |
690 | /// float/double, and whose elements are just simple data values |
691 | /// (i.e. ConstantInt/ConstantFP). This Constant node has no operands because it |
692 | /// stores all of the elements of the constant as densely packed data, instead |
693 | /// of as Value*'s. |
694 | class ConstantDataArray final : public ConstantDataSequential { |
695 | friend class ConstantDataSequential; |
696 | |
697 | explicit ConstantDataArray(Type *ty, const char *Data) |
698 | : ConstantDataSequential(ty, ConstantDataArrayVal, Data) {} |
699 | |
700 | public: |
701 | ConstantDataArray(const ConstantDataArray &) = delete; |
702 | |
703 | /// get() constructor - Return a constant with array type with an element |
704 | /// count and element type matching the ArrayRef passed in. Note that this |
705 | /// can return a ConstantAggregateZero object. |
706 | template <typename ElementTy> |
707 | static Constant *get(LLVMContext &Context, ArrayRef<ElementTy> Elts) { |
708 | const char *Data = reinterpret_cast<const char *>(Elts.data()); |
709 | return getRaw(StringRef(Data, Elts.size() * sizeof(ElementTy)), Elts.size(), |
710 | Type::getScalarTy<ElementTy>(Context)); |
711 | } |
712 | |
713 | /// get() constructor - ArrayTy needs to be compatible with |
714 | /// ArrayRef<ElementTy>. Calls get(LLVMContext, ArrayRef<ElementTy>). |
715 | template <typename ArrayTy> |
716 | static Constant *get(LLVMContext &Context, ArrayTy &Elts) { |
717 | return ConstantDataArray::get(Context, makeArrayRef(Elts)); |
718 | } |
719 | |
720 | /// get() constructor - Return a constant with array type with an element |
721 | /// count and element type matching the NumElements and ElementTy parameters |
722 | /// passed in. Note that this can return a ConstantAggregateZero object. |
723 | /// ElementTy needs to be one of i8/i16/i32/i64/float/double. Data is the |
724 | /// buffer containing the elements. Be careful to make sure Data uses the |
725 | /// right endianness, the buffer will be used as-is. |
726 | static Constant *getRaw(StringRef Data, uint64_t NumElements, Type *ElementTy) { |
727 | Type *Ty = ArrayType::get(ElementTy, NumElements); |
728 | return getImpl(Data, Ty); |
729 | } |
730 | |
731 | /// getFP() constructors - Return a constant of array type with a float |
732 | /// element type taken from argument `ElementType', and count taken from |
733 | /// argument `Elts'. The amount of bits of the contained type must match the |
734 | /// number of bits of the type contained in the passed in ArrayRef. |
735 | /// (i.e. half or bfloat for 16bits, float for 32bits, double for 64bits) Note |
736 | /// that this can return a ConstantAggregateZero object. |
737 | static Constant *getFP(Type *ElementType, ArrayRef<uint16_t> Elts); |
738 | static Constant *getFP(Type *ElementType, ArrayRef<uint32_t> Elts); |
739 | static Constant *getFP(Type *ElementType, ArrayRef<uint64_t> Elts); |
740 | |
741 | /// This method constructs a CDS and initializes it with a text string. |
742 | /// The default behavior (AddNull==true) causes a null terminator to |
743 | /// be placed at the end of the array (increasing the length of the string by |
744 | /// one more than the StringRef would normally indicate. Pass AddNull=false |
745 | /// to disable this behavior. |
746 | static Constant *getString(LLVMContext &Context, StringRef Initializer, |
747 | bool AddNull = true); |
748 | |
749 | /// Specialize the getType() method to always return an ArrayType, |
750 | /// which reduces the amount of casting needed in parts of the compiler. |
751 | inline ArrayType *getType() const { |
752 | return cast<ArrayType>(Value::getType()); |
753 | } |
754 | |
755 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
756 | static bool classof(const Value *V) { |
757 | return V->getValueID() == ConstantDataArrayVal; |
758 | } |
759 | }; |
760 | |
761 | //===----------------------------------------------------------------------===// |
762 | /// A vector constant whose element type is a simple 1/2/4/8-byte integer or |
763 | /// float/double, and whose elements are just simple data values |
764 | /// (i.e. ConstantInt/ConstantFP). This Constant node has no operands because it |
765 | /// stores all of the elements of the constant as densely packed data, instead |
766 | /// of as Value*'s. |
767 | class ConstantDataVector final : public ConstantDataSequential { |
768 | friend class ConstantDataSequential; |
769 | |
770 | explicit ConstantDataVector(Type *ty, const char *Data) |
771 | : ConstantDataSequential(ty, ConstantDataVectorVal, Data), |
772 | IsSplatSet(false) {} |
773 | // Cache whether or not the constant is a splat. |
774 | mutable bool IsSplatSet : 1; |
775 | mutable bool IsSplat : 1; |
776 | bool isSplatData() const; |
777 | |
778 | public: |
779 | ConstantDataVector(const ConstantDataVector &) = delete; |
780 | |
781 | /// get() constructors - Return a constant with vector type with an element |
782 | /// count and element type matching the ArrayRef passed in. Note that this |
783 | /// can return a ConstantAggregateZero object. |
784 | static Constant *get(LLVMContext &Context, ArrayRef<uint8_t> Elts); |
785 | static Constant *get(LLVMContext &Context, ArrayRef<uint16_t> Elts); |
786 | static Constant *get(LLVMContext &Context, ArrayRef<uint32_t> Elts); |
787 | static Constant *get(LLVMContext &Context, ArrayRef<uint64_t> Elts); |
788 | static Constant *get(LLVMContext &Context, ArrayRef<float> Elts); |
789 | static Constant *get(LLVMContext &Context, ArrayRef<double> Elts); |
790 | |
791 | /// getFP() constructors - Return a constant of vector type with a float |
792 | /// element type taken from argument `ElementType', and count taken from |
793 | /// argument `Elts'. The amount of bits of the contained type must match the |
794 | /// number of bits of the type contained in the passed in ArrayRef. |
795 | /// (i.e. half or bfloat for 16bits, float for 32bits, double for 64bits) Note |
796 | /// that this can return a ConstantAggregateZero object. |
797 | static Constant *getFP(Type *ElementType, ArrayRef<uint16_t> Elts); |
798 | static Constant *getFP(Type *ElementType, ArrayRef<uint32_t> Elts); |
799 | static Constant *getFP(Type *ElementType, ArrayRef<uint64_t> Elts); |
800 | |
801 | /// Return a ConstantVector with the specified constant in each element. |
802 | /// The specified constant has to be a of a compatible type (i8/i16/ |
803 | /// i32/i64/float/double) and must be a ConstantFP or ConstantInt. |
804 | static Constant *getSplat(unsigned NumElts, Constant *Elt); |
805 | |
806 | /// Returns true if this is a splat constant, meaning that all elements have |
807 | /// the same value. |
808 | bool isSplat() const; |
809 | |
810 | /// If this is a splat constant, meaning that all of the elements have the |
811 | /// same value, return that value. Otherwise return NULL. |
812 | Constant *getSplatValue() const; |
813 | |
814 | /// Specialize the getType() method to always return a FixedVectorType, |
815 | /// which reduces the amount of casting needed in parts of the compiler. |
816 | inline FixedVectorType *getType() const { |
817 | return cast<FixedVectorType>(Value::getType()); |
818 | } |
819 | |
820 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
821 | static bool classof(const Value *V) { |
822 | return V->getValueID() == ConstantDataVectorVal; |
823 | } |
824 | }; |
825 | |
826 | //===----------------------------------------------------------------------===// |
827 | /// A constant token which is empty |
828 | /// |
829 | class ConstantTokenNone final : public ConstantData { |
830 | friend class Constant; |
831 | |
832 | explicit ConstantTokenNone(LLVMContext &Context) |
833 | : ConstantData(Type::getTokenTy(Context), ConstantTokenNoneVal) {} |
834 | |
835 | void destroyConstantImpl(); |
836 | |
837 | public: |
838 | ConstantTokenNone(const ConstantTokenNone &) = delete; |
839 | |
840 | /// Return the ConstantTokenNone. |
841 | static ConstantTokenNone *get(LLVMContext &Context); |
842 | |
843 | /// Methods to support type inquiry through isa, cast, and dyn_cast. |
844 | static bool classof(const Value *V) { |
845 | return V->getValueID() == ConstantTokenNoneVal; |
846 | } |
847 | }; |
848 | |
849 | /// The address of a basic block. |
850 | /// |
851 | class BlockAddress final : public Constant { |
852 | friend class Constant; |
853 | |
854 | BlockAddress(Function *F, BasicBlock *BB); |
855 | |
856 | void *operator new(size_t s) { return User::operator new(s, 2); } |
857 | |
858 | void destroyConstantImpl(); |
859 | Value *handleOperandChangeImpl(Value *From, Value *To); |
860 | |
861 | public: |
862 | /// Return a BlockAddress for the specified function and basic block. |
863 | static BlockAddress *get(Function *F, BasicBlock *BB); |
864 | |
865 | /// Return a BlockAddress for the specified basic block. The basic |
866 | /// block must be embedded into a function. |
867 | static BlockAddress *get(BasicBlock *BB); |
868 | |
869 | /// Lookup an existing \c BlockAddress constant for the given BasicBlock. |
870 | /// |
871 | /// \returns 0 if \c !BB->hasAddressTaken(), otherwise the \c BlockAddress. |
872 | static BlockAddress *lookup(const BasicBlock *BB); |
873 | |
874 | /// Transparently provide more efficient getOperand methods. |
875 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
876 | |
877 | Function *getFunction() const { return (Function*)Op<0>().get(); } |
878 | BasicBlock *getBasicBlock() const { return (BasicBlock*)Op<1>().get(); } |
879 | |
880 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
881 | static bool classof(const Value *V) { |
882 | return V->getValueID() == BlockAddressVal; |
883 | } |
884 | }; |
885 | |
886 | template <> |
887 | struct OperandTraits<BlockAddress> : |
888 | public FixedNumOperandTraits<BlockAddress, 2> { |
889 | }; |
890 | |
891 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BlockAddress, Value)BlockAddress::op_iterator BlockAddress::op_begin() { return OperandTraits <BlockAddress>::op_begin(this); } BlockAddress::const_op_iterator BlockAddress::op_begin() const { return OperandTraits<BlockAddress >::op_begin(const_cast<BlockAddress*>(this)); } BlockAddress ::op_iterator BlockAddress::op_end() { return OperandTraits< BlockAddress>::op_end(this); } BlockAddress::const_op_iterator BlockAddress::op_end() const { return OperandTraits<BlockAddress >::op_end(const_cast<BlockAddress*>(this)); } Value * BlockAddress::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<BlockAddress>::operands(this) && "getOperand() out of range!") ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<BlockAddress>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/Constants.h" , 891, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<BlockAddress>::op_begin(const_cast<BlockAddress *>(this))[i_nocapture].get()); } void BlockAddress::setOperand (unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture < OperandTraits<BlockAddress>::operands(this) && "setOperand() out of range!") ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<BlockAddress>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/Constants.h" , 891, __PRETTY_FUNCTION__)); OperandTraits<BlockAddress> ::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned BlockAddress ::getNumOperands() const { return OperandTraits<BlockAddress >::operands(this); } template <int Idx_nocapture> Use &BlockAddress::Op() { return this->OpFrom<Idx_nocapture >(this); } template <int Idx_nocapture> const Use & BlockAddress::Op() const { return this->OpFrom<Idx_nocapture >(this); } |
892 | |
893 | /// Wrapper for a function that represents a value that |
894 | /// functionally represents the original function. This can be a function, |
895 | /// global alias to a function, or an ifunc. |
896 | class DSOLocalEquivalent final : public Constant { |
897 | friend class Constant; |
898 | |
899 | DSOLocalEquivalent(GlobalValue *GV); |
900 | |
901 | void *operator new(size_t s) { return User::operator new(s, 1); } |
902 | |
903 | void destroyConstantImpl(); |
904 | Value *handleOperandChangeImpl(Value *From, Value *To); |
905 | |
906 | public: |
907 | /// Return a DSOLocalEquivalent for the specified global value. |
908 | static DSOLocalEquivalent *get(GlobalValue *GV); |
909 | |
910 | /// Transparently provide more efficient getOperand methods. |
911 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
912 | |
913 | GlobalValue *getGlobalValue() const { |
914 | return cast<GlobalValue>(Op<0>().get()); |
915 | } |
916 | |
917 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
918 | static bool classof(const Value *V) { |
919 | return V->getValueID() == DSOLocalEquivalentVal; |
920 | } |
921 | }; |
922 | |
923 | template <> |
924 | struct OperandTraits<DSOLocalEquivalent> |
925 | : public FixedNumOperandTraits<DSOLocalEquivalent, 1> {}; |
926 | |
927 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(DSOLocalEquivalent, Value)DSOLocalEquivalent::op_iterator DSOLocalEquivalent::op_begin( ) { return OperandTraits<DSOLocalEquivalent>::op_begin( this); } DSOLocalEquivalent::const_op_iterator DSOLocalEquivalent ::op_begin() const { return OperandTraits<DSOLocalEquivalent >::op_begin(const_cast<DSOLocalEquivalent*>(this)); } DSOLocalEquivalent::op_iterator DSOLocalEquivalent::op_end() { return OperandTraits<DSOLocalEquivalent>::op_end(this ); } DSOLocalEquivalent::const_op_iterator DSOLocalEquivalent ::op_end() const { return OperandTraits<DSOLocalEquivalent >::op_end(const_cast<DSOLocalEquivalent*>(this)); } Value *DSOLocalEquivalent::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<DSOLocalEquivalent>:: operands(this) && "getOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<DSOLocalEquivalent>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/Constants.h" , 927, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<DSOLocalEquivalent>::op_begin(const_cast <DSOLocalEquivalent*>(this))[i_nocapture].get()); } void DSOLocalEquivalent::setOperand(unsigned i_nocapture, Value * Val_nocapture) { ((i_nocapture < OperandTraits<DSOLocalEquivalent >::operands(this) && "setOperand() out of range!") ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<DSOLocalEquivalent>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/Constants.h" , 927, __PRETTY_FUNCTION__)); OperandTraits<DSOLocalEquivalent >::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned DSOLocalEquivalent::getNumOperands() const { return OperandTraits <DSOLocalEquivalent>::operands(this); } template <int Idx_nocapture> Use &DSOLocalEquivalent::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &DSOLocalEquivalent::Op() const { return this->OpFrom<Idx_nocapture>(this); } |
928 | |
929 | //===----------------------------------------------------------------------===// |
930 | /// A constant value that is initialized with an expression using |
931 | /// other constant values. |
932 | /// |
933 | /// This class uses the standard Instruction opcodes to define the various |
934 | /// constant expressions. The Opcode field for the ConstantExpr class is |
935 | /// maintained in the Value::SubclassData field. |
936 | class ConstantExpr : public Constant { |
937 | friend struct ConstantExprKeyType; |
938 | friend class Constant; |
939 | |
940 | void destroyConstantImpl(); |
941 | Value *handleOperandChangeImpl(Value *From, Value *To); |
942 | |
943 | protected: |
944 | ConstantExpr(Type *ty, unsigned Opcode, Use *Ops, unsigned NumOps) |
945 | : Constant(ty, ConstantExprVal, Ops, NumOps) { |
946 | // Operation type (an Instruction opcode) is stored as the SubclassData. |
947 | setValueSubclassData(Opcode); |
948 | } |
949 | |
950 | ~ConstantExpr() = default; |
951 | |
952 | public: |
953 | // Static methods to construct a ConstantExpr of different kinds. Note that |
954 | // these methods may return a object that is not an instance of the |
955 | // ConstantExpr class, because they will attempt to fold the constant |
956 | // expression into something simpler if possible. |
957 | |
958 | /// getAlignOf constant expr - computes the alignment of a type in a target |
959 | /// independent way (Note: the return type is an i64). |
960 | static Constant *getAlignOf(Type *Ty); |
961 | |
962 | /// getSizeOf constant expr - computes the (alloc) size of a type (in |
963 | /// address-units, not bits) in a target independent way (Note: the return |
964 | /// type is an i64). |
965 | /// |
966 | static Constant *getSizeOf(Type *Ty); |
967 | |
968 | /// getOffsetOf constant expr - computes the offset of a struct field in a |
969 | /// target independent way (Note: the return type is an i64). |
970 | /// |
971 | static Constant *getOffsetOf(StructType *STy, unsigned FieldNo); |
972 | |
973 | /// getOffsetOf constant expr - This is a generalized form of getOffsetOf, |
974 | /// which supports any aggregate type, and any Constant index. |
975 | /// |
976 | static Constant *getOffsetOf(Type *Ty, Constant *FieldNo); |
977 | |
978 | static Constant *getNeg(Constant *C, bool HasNUW = false, bool HasNSW =false); |
979 | static Constant *getFNeg(Constant *C); |
980 | static Constant *getNot(Constant *C); |
981 | static Constant *getAdd(Constant *C1, Constant *C2, |
982 | bool HasNUW = false, bool HasNSW = false); |
983 | static Constant *getFAdd(Constant *C1, Constant *C2); |
984 | static Constant *getSub(Constant *C1, Constant *C2, |
985 | bool HasNUW = false, bool HasNSW = false); |
986 | static Constant *getFSub(Constant *C1, Constant *C2); |
987 | static Constant *getMul(Constant *C1, Constant *C2, |
988 | bool HasNUW = false, bool HasNSW = false); |
989 | static Constant *getFMul(Constant *C1, Constant *C2); |
990 | static Constant *getUDiv(Constant *C1, Constant *C2, bool isExact = false); |
991 | static Constant *getSDiv(Constant *C1, Constant *C2, bool isExact = false); |
992 | static Constant *getFDiv(Constant *C1, Constant *C2); |
993 | static Constant *getURem(Constant *C1, Constant *C2); |
994 | static Constant *getSRem(Constant *C1, Constant *C2); |
995 | static Constant *getFRem(Constant *C1, Constant *C2); |
996 | static Constant *getAnd(Constant *C1, Constant *C2); |
997 | static Constant *getOr(Constant *C1, Constant *C2); |
998 | static Constant *getXor(Constant *C1, Constant *C2); |
999 | static Constant *getUMin(Constant *C1, Constant *C2); |
1000 | static Constant *getShl(Constant *C1, Constant *C2, |
1001 | bool HasNUW = false, bool HasNSW = false); |
1002 | static Constant *getLShr(Constant *C1, Constant *C2, bool isExact = false); |
1003 | static Constant *getAShr(Constant *C1, Constant *C2, bool isExact = false); |
1004 | static Constant *getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced = false); |
1005 | static Constant *getSExt(Constant *C, Type *Ty, bool OnlyIfReduced = false); |
1006 | static Constant *getZExt(Constant *C, Type *Ty, bool OnlyIfReduced = false); |
1007 | static Constant *getFPTrunc(Constant *C, Type *Ty, |
1008 | bool OnlyIfReduced = false); |
1009 | static Constant *getFPExtend(Constant *C, Type *Ty, |
1010 | bool OnlyIfReduced = false); |
1011 | static Constant *getUIToFP(Constant *C, Type *Ty, bool OnlyIfReduced = false); |
1012 | static Constant *getSIToFP(Constant *C, Type *Ty, bool OnlyIfReduced = false); |
1013 | static Constant *getFPToUI(Constant *C, Type *Ty, bool OnlyIfReduced = false); |
1014 | static Constant *getFPToSI(Constant *C, Type *Ty, bool OnlyIfReduced = false); |
1015 | static Constant *getPtrToInt(Constant *C, Type *Ty, |
1016 | bool OnlyIfReduced = false); |
1017 | static Constant *getIntToPtr(Constant *C, Type *Ty, |
1018 | bool OnlyIfReduced = false); |
1019 | static Constant *getBitCast(Constant *C, Type *Ty, |
1020 | bool OnlyIfReduced = false); |
1021 | static Constant *getAddrSpaceCast(Constant *C, Type *Ty, |
1022 | bool OnlyIfReduced = false); |
1023 | |
1024 | static Constant *getNSWNeg(Constant *C) { return getNeg(C, false, true); } |
1025 | static Constant *getNUWNeg(Constant *C) { return getNeg(C, true, false); } |
1026 | |
1027 | static Constant *getNSWAdd(Constant *C1, Constant *C2) { |
1028 | return getAdd(C1, C2, false, true); |
1029 | } |
1030 | |
1031 | static Constant *getNUWAdd(Constant *C1, Constant *C2) { |
1032 | return getAdd(C1, C2, true, false); |
1033 | } |
1034 | |
1035 | static Constant *getNSWSub(Constant *C1, Constant *C2) { |
1036 | return getSub(C1, C2, false, true); |
1037 | } |
1038 | |
1039 | static Constant *getNUWSub(Constant *C1, Constant *C2) { |
1040 | return getSub(C1, C2, true, false); |
1041 | } |
1042 | |
1043 | static Constant *getNSWMul(Constant *C1, Constant *C2) { |
1044 | return getMul(C1, C2, false, true); |
1045 | } |
1046 | |
1047 | static Constant *getNUWMul(Constant *C1, Constant *C2) { |
1048 | return getMul(C1, C2, true, false); |
1049 | } |
1050 | |
1051 | static Constant *getNSWShl(Constant *C1, Constant *C2) { |
1052 | return getShl(C1, C2, false, true); |
1053 | } |
1054 | |
1055 | static Constant *getNUWShl(Constant *C1, Constant *C2) { |
1056 | return getShl(C1, C2, true, false); |
1057 | } |
1058 | |
1059 | static Constant *getExactSDiv(Constant *C1, Constant *C2) { |
1060 | return getSDiv(C1, C2, true); |
1061 | } |
1062 | |
1063 | static Constant *getExactUDiv(Constant *C1, Constant *C2) { |
1064 | return getUDiv(C1, C2, true); |
1065 | } |
1066 | |
1067 | static Constant *getExactAShr(Constant *C1, Constant *C2) { |
1068 | return getAShr(C1, C2, true); |
1069 | } |
1070 | |
1071 | static Constant *getExactLShr(Constant *C1, Constant *C2) { |
1072 | return getLShr(C1, C2, true); |
1073 | } |
1074 | |
1075 | /// If C is a scalar/fixed width vector of known powers of 2, then this |
1076 | /// function returns a new scalar/fixed width vector obtained from logBase2 |
1077 | /// of C. Undef vector elements are set to zero. |
1078 | /// Return a null pointer otherwise. |
1079 | static Constant *getExactLogBase2(Constant *C); |
1080 | |
1081 | /// Return the identity constant for a binary opcode. |
1082 | /// The identity constant C is defined as X op C = X and C op X = X for every |
1083 | /// X when the binary operation is commutative. If the binop is not |
1084 | /// commutative, callers can acquire the operand 1 identity constant by |
1085 | /// setting AllowRHSConstant to true. For example, any shift has a zero |
1086 | /// identity constant for operand 1: X shift 0 = X. |
1087 | /// Return nullptr if the operator does not have an identity constant. |
1088 | static Constant *getBinOpIdentity(unsigned Opcode, Type *Ty, |
1089 | bool AllowRHSConstant = false); |
1090 | |
1091 | /// Return the absorbing element for the given binary |
1092 | /// operation, i.e. a constant C such that X op C = C and C op X = C for |
1093 | /// every X. For example, this returns zero for integer multiplication. |
1094 | /// It returns null if the operator doesn't have an absorbing element. |
1095 | static Constant *getBinOpAbsorber(unsigned Opcode, Type *Ty); |
1096 | |
1097 | /// Transparently provide more efficient getOperand methods. |
1098 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Constant)public: inline Constant *getOperand(unsigned) const; inline void setOperand(unsigned, Constant*); inline op_iterator op_begin (); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
1099 | |
1100 | /// Convenience function for getting a Cast operation. |
1101 | /// |
1102 | /// \param ops The opcode for the conversion |
1103 | /// \param C The constant to be converted |
1104 | /// \param Ty The type to which the constant is converted |
1105 | /// \param OnlyIfReduced see \a getWithOperands() docs. |
1106 | static Constant *getCast(unsigned ops, Constant *C, Type *Ty, |
1107 | bool OnlyIfReduced = false); |
1108 | |
1109 | // Create a ZExt or BitCast cast constant expression |
1110 | static Constant *getZExtOrBitCast( |
1111 | Constant *C, ///< The constant to zext or bitcast |
1112 | Type *Ty ///< The type to zext or bitcast C to |
1113 | ); |
1114 | |
1115 | // Create a SExt or BitCast cast constant expression |
1116 | static Constant *getSExtOrBitCast( |
1117 | Constant *C, ///< The constant to sext or bitcast |
1118 | Type *Ty ///< The type to sext or bitcast C to |
1119 | ); |
1120 | |
1121 | // Create a Trunc or BitCast cast constant expression |
1122 | static Constant *getTruncOrBitCast( |
1123 | Constant *C, ///< The constant to trunc or bitcast |
1124 | Type *Ty ///< The type to trunc or bitcast C to |
1125 | ); |
1126 | |
1127 | /// Create a BitCast, AddrSpaceCast, or a PtrToInt cast constant |
1128 | /// expression. |
1129 | static Constant *getPointerCast( |
1130 | Constant *C, ///< The pointer value to be casted (operand 0) |
1131 | Type *Ty ///< The type to which cast should be made |
1132 | ); |
1133 | |
1134 | /// Create a BitCast or AddrSpaceCast for a pointer type depending on |
1135 | /// the address space. |
1136 | static Constant *getPointerBitCastOrAddrSpaceCast( |
1137 | Constant *C, ///< The constant to addrspacecast or bitcast |
1138 | Type *Ty ///< The type to bitcast or addrspacecast C to |
1139 | ); |
1140 | |
1141 | /// Create a ZExt, Bitcast or Trunc for integer -> integer casts |
1142 | static Constant *getIntegerCast( |
1143 | Constant *C, ///< The integer constant to be casted |
1144 | Type *Ty, ///< The integer type to cast to |
1145 | bool isSigned ///< Whether C should be treated as signed or not |
1146 | ); |
1147 | |
1148 | /// Create a FPExt, Bitcast or FPTrunc for fp -> fp casts |
1149 | static Constant *getFPCast( |
1150 | Constant *C, ///< The integer constant to be casted |
1151 | Type *Ty ///< The integer type to cast to |
1152 | ); |
1153 | |
1154 | /// Return true if this is a convert constant expression |
1155 | bool isCast() const; |
1156 | |
1157 | /// Return true if this is a compare constant expression |
1158 | bool isCompare() const; |
1159 | |
1160 | /// Return true if this is an insertvalue or extractvalue expression, |
1161 | /// and the getIndices() method may be used. |
1162 | bool hasIndices() const; |
1163 | |
1164 | /// Return true if this is a getelementptr expression and all |
1165 | /// the index operands are compile-time known integers within the |
1166 | /// corresponding notional static array extents. Note that this is |
1167 | /// not equivalant to, a subset of, or a superset of the "inbounds" |
1168 | /// property. |
1169 | bool isGEPWithNoNotionalOverIndexing() const; |
1170 | |
1171 | /// Select constant expr |
1172 | /// |
1173 | /// \param OnlyIfReducedTy see \a getWithOperands() docs. |
1174 | static Constant *getSelect(Constant *C, Constant *V1, Constant *V2, |
1175 | Type *OnlyIfReducedTy = nullptr); |
1176 | |
1177 | /// get - Return a unary operator constant expression, |
1178 | /// folding if possible. |
1179 | /// |
1180 | /// \param OnlyIfReducedTy see \a getWithOperands() docs. |
1181 | static Constant *get(unsigned Opcode, Constant *C1, unsigned Flags = 0, |
1182 | Type *OnlyIfReducedTy = nullptr); |
1183 | |
1184 | /// get - Return a binary or shift operator constant expression, |
1185 | /// folding if possible. |
1186 | /// |
1187 | /// \param OnlyIfReducedTy see \a getWithOperands() docs. |
1188 | static Constant *get(unsigned Opcode, Constant *C1, Constant *C2, |
1189 | unsigned Flags = 0, Type *OnlyIfReducedTy = nullptr); |
1190 | |
1191 | /// Return an ICmp or FCmp comparison operator constant expression. |
1192 | /// |
1193 | /// \param OnlyIfReduced see \a getWithOperands() docs. |
1194 | static Constant *getCompare(unsigned short pred, Constant *C1, Constant *C2, |
1195 | bool OnlyIfReduced = false); |
1196 | |
1197 | /// get* - Return some common constants without having to |
1198 | /// specify the full Instruction::OPCODE identifier. |
1199 | /// |
1200 | static Constant *getICmp(unsigned short pred, Constant *LHS, Constant *RHS, |
1201 | bool OnlyIfReduced = false); |
1202 | static Constant *getFCmp(unsigned short pred, Constant *LHS, Constant *RHS, |
1203 | bool OnlyIfReduced = false); |
1204 | |
1205 | /// Getelementptr form. Value* is only accepted for convenience; |
1206 | /// all elements must be Constants. |
1207 | /// |
1208 | /// \param InRangeIndex the inrange index if present or None. |
1209 | /// \param OnlyIfReducedTy see \a getWithOperands() docs. |
1210 | static Constant *getGetElementPtr(Type *Ty, Constant *C, |
1211 | ArrayRef<Constant *> IdxList, |
1212 | bool InBounds = false, |
1213 | Optional<unsigned> InRangeIndex = None, |
1214 | Type *OnlyIfReducedTy = nullptr) { |
1215 | return getGetElementPtr( |
1216 | Ty, C, makeArrayRef((Value * const *)IdxList.data(), IdxList.size()), |
1217 | InBounds, InRangeIndex, OnlyIfReducedTy); |
1218 | } |
1219 | static Constant *getGetElementPtr(Type *Ty, Constant *C, Constant *Idx, |
1220 | bool InBounds = false, |
1221 | Optional<unsigned> InRangeIndex = None, |
1222 | Type *OnlyIfReducedTy = nullptr) { |
1223 | // This form of the function only exists to avoid ambiguous overload |
1224 | // warnings about whether to convert Idx to ArrayRef<Constant *> or |
1225 | // ArrayRef<Value *>. |
1226 | return getGetElementPtr(Ty, C, cast<Value>(Idx), InBounds, InRangeIndex, |
1227 | OnlyIfReducedTy); |
1228 | } |
1229 | static Constant *getGetElementPtr(Type *Ty, Constant *C, |
1230 | ArrayRef<Value *> IdxList, |
1231 | bool InBounds = false, |
1232 | Optional<unsigned> InRangeIndex = None, |
1233 | Type *OnlyIfReducedTy = nullptr); |
1234 | |
1235 | /// Create an "inbounds" getelementptr. See the documentation for the |
1236 | /// "inbounds" flag in LangRef.html for details. |
1237 | static Constant *getInBoundsGetElementPtr(Type *Ty, Constant *C, |
1238 | ArrayRef<Constant *> IdxList) { |
1239 | return getGetElementPtr(Ty, C, IdxList, true); |
1240 | } |
1241 | static Constant *getInBoundsGetElementPtr(Type *Ty, Constant *C, |
1242 | Constant *Idx) { |
1243 | // This form of the function only exists to avoid ambiguous overload |
1244 | // warnings about whether to convert Idx to ArrayRef<Constant *> or |
1245 | // ArrayRef<Value *>. |
1246 | return getGetElementPtr(Ty, C, Idx, true); |
1247 | } |
1248 | static Constant *getInBoundsGetElementPtr(Type *Ty, Constant *C, |
1249 | ArrayRef<Value *> IdxList) { |
1250 | return getGetElementPtr(Ty, C, IdxList, true); |
1251 | } |
1252 | |
1253 | static Constant *getExtractElement(Constant *Vec, Constant *Idx, |
1254 | Type *OnlyIfReducedTy = nullptr); |
1255 | static Constant *getInsertElement(Constant *Vec, Constant *Elt, Constant *Idx, |
1256 | Type *OnlyIfReducedTy = nullptr); |
1257 | static Constant *getShuffleVector(Constant *V1, Constant *V2, |
1258 | ArrayRef<int> Mask, |
1259 | Type *OnlyIfReducedTy = nullptr); |
1260 | static Constant *getExtractValue(Constant *Agg, ArrayRef<unsigned> Idxs, |
1261 | Type *OnlyIfReducedTy = nullptr); |
1262 | static Constant *getInsertValue(Constant *Agg, Constant *Val, |
1263 | ArrayRef<unsigned> Idxs, |
1264 | Type *OnlyIfReducedTy = nullptr); |
1265 | |
1266 | /// Return the opcode at the root of this constant expression |
1267 | unsigned getOpcode() const { return getSubclassDataFromValue(); } |
1268 | |
1269 | /// Return the ICMP or FCMP predicate value. Assert if this is not an ICMP or |
1270 | /// FCMP constant expression. |
1271 | unsigned getPredicate() const; |
1272 | |
1273 | /// Assert that this is an insertvalue or exactvalue |
1274 | /// expression and return the list of indices. |
1275 | ArrayRef<unsigned> getIndices() const; |
1276 | |
1277 | /// Assert that this is a shufflevector and return the mask. See class |
1278 | /// ShuffleVectorInst for a description of the mask representation. |
1279 | ArrayRef<int> getShuffleMask() const; |
1280 | |
1281 | /// Assert that this is a shufflevector and return the mask. |
1282 | /// |
1283 | /// TODO: This is a temporary hack until we update the bitcode format for |
1284 | /// shufflevector. |
1285 | Constant *getShuffleMaskForBitcode() const; |
1286 | |
1287 | /// Return a string representation for an opcode. |
1288 | const char *getOpcodeName() const; |
1289 | |
1290 | /// Return a constant expression identical to this one, but with the specified |
1291 | /// operand set to the specified value. |
1292 | Constant *getWithOperandReplaced(unsigned OpNo, Constant *Op) const; |
1293 | |
1294 | /// This returns the current constant expression with the operands replaced |
1295 | /// with the specified values. The specified array must have the same number |
1296 | /// of operands as our current one. |
1297 | Constant *getWithOperands(ArrayRef<Constant*> Ops) const { |
1298 | return getWithOperands(Ops, getType()); |
1299 | } |
1300 | |
1301 | /// Get the current expression with the operands replaced. |
1302 | /// |
1303 | /// Return the current constant expression with the operands replaced with \c |
1304 | /// Ops and the type with \c Ty. The new operands must have the same number |
1305 | /// as the current ones. |
1306 | /// |
1307 | /// If \c OnlyIfReduced is \c true, nullptr will be returned unless something |
1308 | /// gets constant-folded, the type changes, or the expression is otherwise |
1309 | /// canonicalized. This parameter should almost always be \c false. |
1310 | Constant *getWithOperands(ArrayRef<Constant *> Ops, Type *Ty, |
1311 | bool OnlyIfReduced = false, |
1312 | Type *SrcTy = nullptr) const; |
1313 | |
1314 | /// Returns an Instruction which implements the same operation as this |
1315 | /// ConstantExpr. The instruction is not linked to any basic block. |
1316 | /// |
1317 | /// A better approach to this could be to have a constructor for Instruction |
1318 | /// which would take a ConstantExpr parameter, but that would have spread |
1319 | /// implementation details of ConstantExpr outside of Constants.cpp, which |
1320 | /// would make it harder to remove ConstantExprs altogether. |
1321 | Instruction *getAsInstruction() const; |
1322 | |
1323 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
1324 | static bool classof(const Value *V) { |
1325 | return V->getValueID() == ConstantExprVal; |
1326 | } |
1327 | |
1328 | private: |
1329 | // Shadow Value::setValueSubclassData with a private forwarding method so that |
1330 | // subclasses cannot accidentally use it. |
1331 | void setValueSubclassData(unsigned short D) { |
1332 | Value::setValueSubclassData(D); |
1333 | } |
1334 | }; |
1335 | |
1336 | template <> |
1337 | struct OperandTraits<ConstantExpr> : |
1338 | public VariadicOperandTraits<ConstantExpr, 1> { |
1339 | }; |
1340 | |
1341 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ConstantExpr, Constant)ConstantExpr::op_iterator ConstantExpr::op_begin() { return OperandTraits <ConstantExpr>::op_begin(this); } ConstantExpr::const_op_iterator ConstantExpr::op_begin() const { return OperandTraits<ConstantExpr >::op_begin(const_cast<ConstantExpr*>(this)); } ConstantExpr ::op_iterator ConstantExpr::op_end() { return OperandTraits< ConstantExpr>::op_end(this); } ConstantExpr::const_op_iterator ConstantExpr::op_end() const { return OperandTraits<ConstantExpr >::op_end(const_cast<ConstantExpr*>(this)); } Constant *ConstantExpr::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<ConstantExpr>::operands(this) && "getOperand() out of range!") ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ConstantExpr>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/Constants.h" , 1341, __PRETTY_FUNCTION__)); return cast_or_null<Constant >( OperandTraits<ConstantExpr>::op_begin(const_cast< ConstantExpr*>(this))[i_nocapture].get()); } void ConstantExpr ::setOperand(unsigned i_nocapture, Constant *Val_nocapture) { ((i_nocapture < OperandTraits<ConstantExpr>::operands (this) && "setOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<ConstantExpr>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/Constants.h" , 1341, __PRETTY_FUNCTION__)); OperandTraits<ConstantExpr> ::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned ConstantExpr ::getNumOperands() const { return OperandTraits<ConstantExpr >::operands(this); } template <int Idx_nocapture> Use &ConstantExpr::Op() { return this->OpFrom<Idx_nocapture >(this); } template <int Idx_nocapture> const Use & ConstantExpr::Op() const { return this->OpFrom<Idx_nocapture >(this); } |
1342 | |
1343 | //===----------------------------------------------------------------------===// |
1344 | /// 'undef' values are things that do not have specified contents. |
1345 | /// These are used for a variety of purposes, including global variable |
1346 | /// initializers and operands to instructions. 'undef' values can occur with |
1347 | /// any first-class type. |
1348 | /// |
1349 | /// Undef values aren't exactly constants; if they have multiple uses, they |
1350 | /// can appear to have different bit patterns at each use. See |
1351 | /// LangRef.html#undefvalues for details. |
1352 | /// |
1353 | class UndefValue : public ConstantData { |
1354 | friend class Constant; |
1355 | |
1356 | explicit UndefValue(Type *T) : ConstantData(T, UndefValueVal) {} |
1357 | |
1358 | void destroyConstantImpl(); |
1359 | |
1360 | protected: |
1361 | explicit UndefValue(Type *T, ValueTy vty) : ConstantData(T, vty) {} |
1362 | |
1363 | public: |
1364 | UndefValue(const UndefValue &) = delete; |
1365 | |
1366 | /// Static factory methods - Return an 'undef' object of the specified type. |
1367 | static UndefValue *get(Type *T); |
1368 | |
1369 | /// If this Undef has array or vector type, return a undef with the right |
1370 | /// element type. |
1371 | UndefValue *getSequentialElement() const; |
1372 | |
1373 | /// If this undef has struct type, return a undef with the right element type |
1374 | /// for the specified element. |
1375 | UndefValue *getStructElement(unsigned Elt) const; |
1376 | |
1377 | /// Return an undef of the right value for the specified GEP index if we can, |
1378 | /// otherwise return null (e.g. if C is a ConstantExpr). |
1379 | UndefValue *getElementValue(Constant *C) const; |
1380 | |
1381 | /// Return an undef of the right value for the specified GEP index. |
1382 | UndefValue *getElementValue(unsigned Idx) const; |
1383 | |
1384 | /// Return the number of elements in the array, vector, or struct. |
1385 | unsigned getNumElements() const; |
1386 | |
1387 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
1388 | static bool classof(const Value *V) { |
1389 | return V->getValueID() == UndefValueVal || |
1390 | V->getValueID() == PoisonValueVal; |
1391 | } |
1392 | }; |
1393 | |
1394 | //===----------------------------------------------------------------------===// |
1395 | /// In order to facilitate speculative execution, many instructions do not |
1396 | /// invoke immediate undefined behavior when provided with illegal operands, |
1397 | /// and return a poison value instead. |
1398 | /// |
1399 | /// see LangRef.html#poisonvalues for details. |
1400 | /// |
1401 | class PoisonValue final : public UndefValue { |
1402 | friend class Constant; |
1403 | |
1404 | explicit PoisonValue(Type *T) : UndefValue(T, PoisonValueVal) {} |
1405 | |
1406 | void destroyConstantImpl(); |
1407 | |
1408 | public: |
1409 | PoisonValue(const PoisonValue &) = delete; |
1410 | |
1411 | /// Static factory methods - Return an 'poison' object of the specified type. |
1412 | static PoisonValue *get(Type *T); |
1413 | |
1414 | /// If this poison has array or vector type, return a poison with the right |
1415 | /// element type. |
1416 | PoisonValue *getSequentialElement() const; |
1417 | |
1418 | /// If this poison has struct type, return a poison with the right element |
1419 | /// type for the specified element. |
1420 | PoisonValue *getStructElement(unsigned Elt) const; |
1421 | |
1422 | /// Return an poison of the right value for the specified GEP index if we can, |
1423 | /// otherwise return null (e.g. if C is a ConstantExpr). |
1424 | PoisonValue *getElementValue(Constant *C) const; |
1425 | |
1426 | /// Return an poison of the right value for the specified GEP index. |
1427 | PoisonValue *getElementValue(unsigned Idx) const; |
1428 | |
1429 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
1430 | static bool classof(const Value *V) { |
1431 | return V->getValueID() == PoisonValueVal; |
1432 | } |
1433 | }; |
1434 | |
1435 | } // end namespace llvm |
1436 | |
1437 | #endif // LLVM_IR_CONSTANTS_H |
1 | //===-- llvm/ADT/APInt.h - For Arbitrary Precision Integer -----*- C++ -*--===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | /// |
9 | /// \file |
10 | /// This file implements a class to represent arbitrary precision |
11 | /// integral constant values and operations on them. |
12 | /// |
13 | //===----------------------------------------------------------------------===// |
14 | |
15 | #ifndef LLVM_ADT_APINT_H |
16 | #define LLVM_ADT_APINT_H |
17 | |
18 | #include "llvm/Support/Compiler.h" |
19 | #include "llvm/Support/MathExtras.h" |
20 | #include <cassert> |
21 | #include <climits> |
22 | #include <cstring> |
23 | #include <string> |
24 | |
25 | namespace llvm { |
26 | class FoldingSetNodeID; |
27 | class StringRef; |
28 | class hash_code; |
29 | class raw_ostream; |
30 | |
31 | template <typename T> class SmallVectorImpl; |
32 | template <typename T> class ArrayRef; |
33 | template <typename T> class Optional; |
34 | template <typename T> struct DenseMapInfo; |
35 | |
36 | class APInt; |
37 | |
38 | inline APInt operator-(APInt); |
39 | |
40 | //===----------------------------------------------------------------------===// |
41 | // APInt Class |
42 | //===----------------------------------------------------------------------===// |
43 | |
44 | /// Class for arbitrary precision integers. |
45 | /// |
46 | /// APInt is a functional replacement for common case unsigned integer type like |
47 | /// "unsigned", "unsigned long" or "uint64_t", but also allows non-byte-width |
48 | /// integer sizes and large integer value types such as 3-bits, 15-bits, or more |
49 | /// than 64-bits of precision. APInt provides a variety of arithmetic operators |
50 | /// and methods to manipulate integer values of any bit-width. It supports both |
51 | /// the typical integer arithmetic and comparison operations as well as bitwise |
52 | /// manipulation. |
53 | /// |
54 | /// The class has several invariants worth noting: |
55 | /// * All bit, byte, and word positions are zero-based. |
56 | /// * Once the bit width is set, it doesn't change except by the Truncate, |
57 | /// SignExtend, or ZeroExtend operations. |
58 | /// * All binary operators must be on APInt instances of the same bit width. |
59 | /// Attempting to use these operators on instances with different bit |
60 | /// widths will yield an assertion. |
61 | /// * The value is stored canonically as an unsigned value. For operations |
62 | /// where it makes a difference, there are both signed and unsigned variants |
63 | /// of the operation. For example, sdiv and udiv. However, because the bit |
64 | /// widths must be the same, operations such as Mul and Add produce the same |
65 | /// results regardless of whether the values are interpreted as signed or |
66 | /// not. |
67 | /// * In general, the class tries to follow the style of computation that LLVM |
68 | /// uses in its IR. This simplifies its use for LLVM. |
69 | /// |
70 | class LLVM_NODISCARD[[clang::warn_unused_result]] APInt { |
71 | public: |
72 | typedef uint64_t WordType; |
73 | |
74 | /// This enum is used to hold the constants we needed for APInt. |
75 | enum : unsigned { |
76 | /// Byte size of a word. |
77 | APINT_WORD_SIZE = sizeof(WordType), |
78 | /// Bits in a word. |
79 | APINT_BITS_PER_WORD = APINT_WORD_SIZE * CHAR_BIT8 |
80 | }; |
81 | |
82 | enum class Rounding { |
83 | DOWN, |
84 | TOWARD_ZERO, |
85 | UP, |
86 | }; |
87 | |
88 | static constexpr WordType WORDTYPE_MAX = ~WordType(0); |
89 | |
90 | private: |
91 | /// This union is used to store the integer value. When the |
92 | /// integer bit-width <= 64, it uses VAL, otherwise it uses pVal. |
93 | union { |
94 | uint64_t VAL; ///< Used to store the <= 64 bits integer value. |
95 | uint64_t *pVal; ///< Used to store the >64 bits integer value. |
96 | } U; |
97 | |
98 | unsigned BitWidth; ///< The number of bits in this APInt. |
99 | |
100 | friend struct DenseMapInfo<APInt>; |
101 | |
102 | friend class APSInt; |
103 | |
104 | /// Fast internal constructor |
105 | /// |
106 | /// This constructor is used only internally for speed of construction of |
107 | /// temporaries. It is unsafe for general use so it is not public. |
108 | APInt(uint64_t *val, unsigned bits) : BitWidth(bits) { |
109 | U.pVal = val; |
110 | } |
111 | |
112 | /// Determine if this APInt just has one word to store value. |
113 | /// |
114 | /// \returns true if the number of bits <= 64, false otherwise. |
115 | bool isSingleWord() const { return BitWidth <= APINT_BITS_PER_WORD; } |
116 | |
117 | /// Determine which word a bit is in. |
118 | /// |
119 | /// \returns the word position for the specified bit position. |
120 | static unsigned whichWord(unsigned bitPosition) { |
121 | return bitPosition / APINT_BITS_PER_WORD; |
122 | } |
123 | |
124 | /// Determine which bit in a word a bit is in. |
125 | /// |
126 | /// \returns the bit position in a word for the specified bit position |
127 | /// in the APInt. |
128 | static unsigned whichBit(unsigned bitPosition) { |
129 | return bitPosition % APINT_BITS_PER_WORD; |
130 | } |
131 | |
132 | /// Get a single bit mask. |
133 | /// |
134 | /// \returns a uint64_t with only bit at "whichBit(bitPosition)" set |
135 | /// This method generates and returns a uint64_t (word) mask for a single |
136 | /// bit at a specific bit position. This is used to mask the bit in the |
137 | /// corresponding word. |
138 | static uint64_t maskBit(unsigned bitPosition) { |
139 | return 1ULL << whichBit(bitPosition); |
140 | } |
141 | |
142 | /// Clear unused high order bits |
143 | /// |
144 | /// This method is used internally to clear the top "N" bits in the high order |
145 | /// word that are not used by the APInt. This is needed after the most |
146 | /// significant word is assigned a value to ensure that those bits are |
147 | /// zero'd out. |
148 | APInt &clearUnusedBits() { |
149 | // Compute how many bits are used in the final word |
150 | unsigned WordBits = ((BitWidth-1) % APINT_BITS_PER_WORD) + 1; |
151 | |
152 | // Mask out the high bits. |
153 | uint64_t mask = WORDTYPE_MAX >> (APINT_BITS_PER_WORD - WordBits); |
154 | if (isSingleWord()) |
155 | U.VAL &= mask; |
156 | else |
157 | U.pVal[getNumWords() - 1] &= mask; |
158 | return *this; |
159 | } |
160 | |
161 | /// Get the word corresponding to a bit position |
162 | /// \returns the corresponding word for the specified bit position. |
163 | uint64_t getWord(unsigned bitPosition) const { |
164 | return isSingleWord() ? U.VAL : U.pVal[whichWord(bitPosition)]; |
165 | } |
166 | |
167 | /// Utility method to change the bit width of this APInt to new bit width, |
168 | /// allocating and/or deallocating as necessary. There is no guarantee on the |
169 | /// value of any bits upon return. Caller should populate the bits after. |
170 | void reallocate(unsigned NewBitWidth); |
171 | |
172 | /// Convert a char array into an APInt |
173 | /// |
174 | /// \param radix 2, 8, 10, 16, or 36 |
175 | /// Converts a string into a number. The string must be non-empty |
176 | /// and well-formed as a number of the given base. The bit-width |
177 | /// must be sufficient to hold the result. |
178 | /// |
179 | /// This is used by the constructors that take string arguments. |
180 | /// |
181 | /// StringRef::getAsInteger is superficially similar but (1) does |
182 | /// not assume that the string is well-formed and (2) grows the |
183 | /// result to hold the input. |
184 | void fromString(unsigned numBits, StringRef str, uint8_t radix); |
185 | |
186 | /// An internal division function for dividing APInts. |
187 | /// |
188 | /// This is used by the toString method to divide by the radix. It simply |
189 | /// provides a more convenient form of divide for internal use since KnuthDiv |
190 | /// has specific constraints on its inputs. If those constraints are not met |
191 | /// then it provides a simpler form of divide. |
192 | static void divide(const WordType *LHS, unsigned lhsWords, |
193 | const WordType *RHS, unsigned rhsWords, WordType *Quotient, |
194 | WordType *Remainder); |
195 | |
196 | /// out-of-line slow case for inline constructor |
197 | void initSlowCase(uint64_t val, bool isSigned); |
198 | |
199 | /// shared code between two array constructors |
200 | void initFromArray(ArrayRef<uint64_t> array); |
201 | |
202 | /// out-of-line slow case for inline copy constructor |
203 | void initSlowCase(const APInt &that); |
204 | |
205 | /// out-of-line slow case for shl |
206 | void shlSlowCase(unsigned ShiftAmt); |
207 | |
208 | /// out-of-line slow case for lshr. |
209 | void lshrSlowCase(unsigned ShiftAmt); |
210 | |
211 | /// out-of-line slow case for ashr. |
212 | void ashrSlowCase(unsigned ShiftAmt); |
213 | |
214 | /// out-of-line slow case for operator= |
215 | void AssignSlowCase(const APInt &RHS); |
216 | |
217 | /// out-of-line slow case for operator== |
218 | bool EqualSlowCase(const APInt &RHS) const LLVM_READONLY__attribute__((__pure__)); |
219 | |
220 | /// out-of-line slow case for countLeadingZeros |
221 | unsigned countLeadingZerosSlowCase() const LLVM_READONLY__attribute__((__pure__)); |
222 | |
223 | /// out-of-line slow case for countLeadingOnes. |
224 | unsigned countLeadingOnesSlowCase() const LLVM_READONLY__attribute__((__pure__)); |
225 | |
226 | /// out-of-line slow case for countTrailingZeros. |
227 | unsigned countTrailingZerosSlowCase() const LLVM_READONLY__attribute__((__pure__)); |
228 | |
229 | /// out-of-line slow case for countTrailingOnes |
230 | unsigned countTrailingOnesSlowCase() const LLVM_READONLY__attribute__((__pure__)); |
231 | |
232 | /// out-of-line slow case for countPopulation |
233 | unsigned countPopulationSlowCase() const LLVM_READONLY__attribute__((__pure__)); |
234 | |
235 | /// out-of-line slow case for intersects. |
236 | bool intersectsSlowCase(const APInt &RHS) const LLVM_READONLY__attribute__((__pure__)); |
237 | |
238 | /// out-of-line slow case for isSubsetOf. |
239 | bool isSubsetOfSlowCase(const APInt &RHS) const LLVM_READONLY__attribute__((__pure__)); |
240 | |
241 | /// out-of-line slow case for setBits. |
242 | void setBitsSlowCase(unsigned loBit, unsigned hiBit); |
243 | |
244 | /// out-of-line slow case for flipAllBits. |
245 | void flipAllBitsSlowCase(); |
246 | |
247 | /// out-of-line slow case for operator&=. |
248 | void AndAssignSlowCase(const APInt& RHS); |
249 | |
250 | /// out-of-line slow case for operator|=. |
251 | void OrAssignSlowCase(const APInt& RHS); |
252 | |
253 | /// out-of-line slow case for operator^=. |
254 | void XorAssignSlowCase(const APInt& RHS); |
255 | |
256 | /// Unsigned comparison. Returns -1, 0, or 1 if this APInt is less than, equal |
257 | /// to, or greater than RHS. |
258 | int compare(const APInt &RHS) const LLVM_READONLY__attribute__((__pure__)); |
259 | |
260 | /// Signed comparison. Returns -1, 0, or 1 if this APInt is less than, equal |
261 | /// to, or greater than RHS. |
262 | int compareSigned(const APInt &RHS) const LLVM_READONLY__attribute__((__pure__)); |
263 | |
264 | public: |
265 | /// \name Constructors |
266 | /// @{ |
267 | |
268 | /// Create a new APInt of numBits width, initialized as val. |
269 | /// |
270 | /// If isSigned is true then val is treated as if it were a signed value |
271 | /// (i.e. as an int64_t) and the appropriate sign extension to the bit width |
272 | /// will be done. Otherwise, no sign extension occurs (high order bits beyond |
273 | /// the range of val are zero filled). |
274 | /// |
275 | /// \param numBits the bit width of the constructed APInt |
276 | /// \param val the initial value of the APInt |
277 | /// \param isSigned how to treat signedness of val |
278 | APInt(unsigned numBits, uint64_t val, bool isSigned = false) |
279 | : BitWidth(numBits) { |
280 | assert(BitWidth && "bitwidth too small")((BitWidth && "bitwidth too small") ? static_cast< void> (0) : __assert_fail ("BitWidth && \"bitwidth too small\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/ADT/APInt.h" , 280, __PRETTY_FUNCTION__)); |
281 | if (isSingleWord()) { |
282 | U.VAL = val; |
283 | clearUnusedBits(); |
284 | } else { |
285 | initSlowCase(val, isSigned); |
286 | } |
287 | } |
288 | |
289 | /// Construct an APInt of numBits width, initialized as bigVal[]. |
290 | /// |
291 | /// Note that bigVal.size() can be smaller or larger than the corresponding |
292 | /// bit width but any extraneous bits will be dropped. |
293 | /// |
294 | /// \param numBits the bit width of the constructed APInt |
295 | /// \param bigVal a sequence of words to form the initial value of the APInt |
296 | APInt(unsigned numBits, ArrayRef<uint64_t> bigVal); |
297 | |
298 | /// Equivalent to APInt(numBits, ArrayRef<uint64_t>(bigVal, numWords)), but |
299 | /// deprecated because this constructor is prone to ambiguity with the |
300 | /// APInt(unsigned, uint64_t, bool) constructor. |
301 | /// |
302 | /// If this overload is ever deleted, care should be taken to prevent calls |
303 | /// from being incorrectly captured by the APInt(unsigned, uint64_t, bool) |
304 | /// constructor. |
305 | APInt(unsigned numBits, unsigned numWords, const uint64_t bigVal[]); |
306 | |
307 | /// Construct an APInt from a string representation. |
308 | /// |
309 | /// This constructor interprets the string \p str in the given radix. The |
310 | /// interpretation stops when the first character that is not suitable for the |
311 | /// radix is encountered, or the end of the string. Acceptable radix values |
312 | /// are 2, 8, 10, 16, and 36. It is an error for the value implied by the |
313 | /// string to require more bits than numBits. |
314 | /// |
315 | /// \param numBits the bit width of the constructed APInt |
316 | /// \param str the string to be interpreted |
317 | /// \param radix the radix to use for the conversion |
318 | APInt(unsigned numBits, StringRef str, uint8_t radix); |
319 | |
320 | /// Simply makes *this a copy of that. |
321 | /// Copy Constructor. |
322 | APInt(const APInt &that) : BitWidth(that.BitWidth) { |
323 | if (isSingleWord()) |
324 | U.VAL = that.U.VAL; |
325 | else |
326 | initSlowCase(that); |
327 | } |
328 | |
329 | /// Move Constructor. |
330 | APInt(APInt &&that) : BitWidth(that.BitWidth) { |
331 | memcpy(&U, &that.U, sizeof(U)); |
332 | that.BitWidth = 0; |
333 | } |
334 | |
335 | /// Destructor. |
336 | ~APInt() { |
337 | if (needsCleanup()) |
338 | delete[] U.pVal; |
339 | } |
340 | |
341 | /// Default constructor that creates an uninteresting APInt |
342 | /// representing a 1-bit zero value. |
343 | /// |
344 | /// This is useful for object deserialization (pair this with the static |
345 | /// method Read). |
346 | explicit APInt() : BitWidth(1) { U.VAL = 0; } |
347 | |
348 | /// Returns whether this instance allocated memory. |
349 | bool needsCleanup() const { return !isSingleWord(); } |
350 | |
351 | /// Used to insert APInt objects, or objects that contain APInt objects, into |
352 | /// FoldingSets. |
353 | void Profile(FoldingSetNodeID &id) const; |
354 | |
355 | /// @} |
356 | /// \name Value Tests |
357 | /// @{ |
358 | |
359 | /// Determine sign of this APInt. |
360 | /// |
361 | /// This tests the high bit of this APInt to determine if it is set. |
362 | /// |
363 | /// \returns true if this APInt is negative, false otherwise |
364 | bool isNegative() const { return (*this)[BitWidth - 1]; } |
365 | |
366 | /// Determine if this APInt Value is non-negative (>= 0) |
367 | /// |
368 | /// This tests the high bit of the APInt to determine if it is unset. |
369 | bool isNonNegative() const { return !isNegative(); } |
370 | |
371 | /// Determine if sign bit of this APInt is set. |
372 | /// |
373 | /// This tests the high bit of this APInt to determine if it is set. |
374 | /// |
375 | /// \returns true if this APInt has its sign bit set, false otherwise. |
376 | bool isSignBitSet() const { return (*this)[BitWidth-1]; } |
377 | |
378 | /// Determine if sign bit of this APInt is clear. |
379 | /// |
380 | /// This tests the high bit of this APInt to determine if it is clear. |
381 | /// |
382 | /// \returns true if this APInt has its sign bit clear, false otherwise. |
383 | bool isSignBitClear() const { return !isSignBitSet(); } |
384 | |
385 | /// Determine if this APInt Value is positive. |
386 | /// |
387 | /// This tests if the value of this APInt is positive (> 0). Note |
388 | /// that 0 is not a positive value. |
389 | /// |
390 | /// \returns true if this APInt is positive. |
391 | bool isStrictlyPositive() const { return isNonNegative() && !isNullValue(); } |
392 | |
393 | /// Determine if this APInt Value is non-positive (<= 0). |
394 | /// |
395 | /// \returns true if this APInt is non-positive. |
396 | bool isNonPositive() const { return !isStrictlyPositive(); } |
397 | |
398 | /// Determine if all bits are set |
399 | /// |
400 | /// This checks to see if the value has all bits of the APInt are set or not. |
401 | bool isAllOnesValue() const { |
402 | if (isSingleWord()) |
403 | return U.VAL == WORDTYPE_MAX >> (APINT_BITS_PER_WORD - BitWidth); |
404 | return countTrailingOnesSlowCase() == BitWidth; |
405 | } |
406 | |
407 | /// Determine if all bits are clear |
408 | /// |
409 | /// This checks to see if the value has all bits of the APInt are clear or |
410 | /// not. |
411 | bool isNullValue() const { return !*this; } |
412 | |
413 | /// Determine if this is a value of 1. |
414 | /// |
415 | /// This checks to see if the value of this APInt is one. |
416 | bool isOneValue() const { |
417 | if (isSingleWord()) |
418 | return U.VAL == 1; |
419 | return countLeadingZerosSlowCase() == BitWidth - 1; |
420 | } |
421 | |
422 | /// Determine if this is the largest unsigned value. |
423 | /// |
424 | /// This checks to see if the value of this APInt is the maximum unsigned |
425 | /// value for the APInt's bit width. |
426 | bool isMaxValue() const { return isAllOnesValue(); } |
427 | |
428 | /// Determine if this is the largest signed value. |
429 | /// |
430 | /// This checks to see if the value of this APInt is the maximum signed |
431 | /// value for the APInt's bit width. |
432 | bool isMaxSignedValue() const { |
433 | if (isSingleWord()) |
434 | return U.VAL == ((WordType(1) << (BitWidth - 1)) - 1); |
435 | return !isNegative() && countTrailingOnesSlowCase() == BitWidth - 1; |
436 | } |
437 | |
438 | /// Determine if this is the smallest unsigned value. |
439 | /// |
440 | /// This checks to see if the value of this APInt is the minimum unsigned |
441 | /// value for the APInt's bit width. |
442 | bool isMinValue() const { return isNullValue(); } |
443 | |
444 | /// Determine if this is the smallest signed value. |
445 | /// |
446 | /// This checks to see if the value of this APInt is the minimum signed |
447 | /// value for the APInt's bit width. |
448 | bool isMinSignedValue() const { |
449 | if (isSingleWord()) |
450 | return U.VAL == (WordType(1) << (BitWidth - 1)); |
451 | return isNegative() && countTrailingZerosSlowCase() == BitWidth - 1; |
452 | } |
453 | |
454 | /// Check if this APInt has an N-bits unsigned integer value. |
455 | bool isIntN(unsigned N) const { |
456 | assert(N && "N == 0 ???")((N && "N == 0 ???") ? static_cast<void> (0) : __assert_fail ("N && \"N == 0 ???\"", "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/ADT/APInt.h" , 456, __PRETTY_FUNCTION__)); |
457 | return getActiveBits() <= N; |
458 | } |
459 | |
460 | /// Check if this APInt has an N-bits signed integer value. |
461 | bool isSignedIntN(unsigned N) const { |
462 | assert(N && "N == 0 ???")((N && "N == 0 ???") ? static_cast<void> (0) : __assert_fail ("N && \"N == 0 ???\"", "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/ADT/APInt.h" , 462, __PRETTY_FUNCTION__)); |
463 | return getMinSignedBits() <= N; |
464 | } |
465 | |
466 | /// Check if this APInt's value is a power of two greater than zero. |
467 | /// |
468 | /// \returns true if the argument APInt value is a power of two > 0. |
469 | bool isPowerOf2() const { |
470 | if (isSingleWord()) |
471 | return isPowerOf2_64(U.VAL); |
472 | return countPopulationSlowCase() == 1; |
473 | } |
474 | |
475 | /// Check if the APInt's value is returned by getSignMask. |
476 | /// |
477 | /// \returns true if this is the value returned by getSignMask. |
478 | bool isSignMask() const { return isMinSignedValue(); } |
479 | |
480 | /// Convert APInt to a boolean value. |
481 | /// |
482 | /// This converts the APInt to a boolean value as a test against zero. |
483 | bool getBoolValue() const { return !!*this; } |
484 | |
485 | /// If this value is smaller than the specified limit, return it, otherwise |
486 | /// return the limit value. This causes the value to saturate to the limit. |
487 | uint64_t getLimitedValue(uint64_t Limit = UINT64_MAX(18446744073709551615UL)) const { |
488 | return ugt(Limit) ? Limit : getZExtValue(); |
489 | } |
490 | |
491 | /// Check if the APInt consists of a repeated bit pattern. |
492 | /// |
493 | /// e.g. 0x01010101 satisfies isSplat(8). |
494 | /// \param SplatSizeInBits The size of the pattern in bits. Must divide bit |
495 | /// width without remainder. |
496 | bool isSplat(unsigned SplatSizeInBits) const; |
497 | |
498 | /// \returns true if this APInt value is a sequence of \param numBits ones |
499 | /// starting at the least significant bit with the remainder zero. |
500 | bool isMask(unsigned numBits) const { |
501 | assert(numBits != 0 && "numBits must be non-zero")((numBits != 0 && "numBits must be non-zero") ? static_cast <void> (0) : __assert_fail ("numBits != 0 && \"numBits must be non-zero\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/ADT/APInt.h" , 501, __PRETTY_FUNCTION__)); |
502 | assert(numBits <= BitWidth && "numBits out of range")((numBits <= BitWidth && "numBits out of range") ? static_cast<void> (0) : __assert_fail ("numBits <= BitWidth && \"numBits out of range\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/ADT/APInt.h" , 502, __PRETTY_FUNCTION__)); |
503 | if (isSingleWord()) |
504 | return U.VAL == (WORDTYPE_MAX >> (APINT_BITS_PER_WORD - numBits)); |
505 | unsigned Ones = countTrailingOnesSlowCase(); |
506 | return (numBits == Ones) && |
507 | ((Ones + countLeadingZerosSlowCase()) == BitWidth); |
508 | } |
509 | |
510 | /// \returns true if this APInt is a non-empty sequence of ones starting at |
511 | /// the least significant bit with the remainder zero. |
512 | /// Ex. isMask(0x0000FFFFU) == true. |
513 | bool isMask() const { |
514 | if (isSingleWord()) |
515 | return isMask_64(U.VAL); |
516 | unsigned Ones = countTrailingOnesSlowCase(); |
517 | return (Ones > 0) && ((Ones + countLeadingZerosSlowCase()) == BitWidth); |
518 | } |
519 | |
520 | /// Return true if this APInt value contains a sequence of ones with |
521 | /// the remainder zero. |
522 | bool isShiftedMask() const { |
523 | if (isSingleWord()) |
524 | return isShiftedMask_64(U.VAL); |
525 | unsigned Ones = countPopulationSlowCase(); |
526 | unsigned LeadZ = countLeadingZerosSlowCase(); |
527 | return (Ones + LeadZ + countTrailingZeros()) == BitWidth; |
528 | } |
529 | |
530 | /// @} |
531 | /// \name Value Generators |
532 | /// @{ |
533 | |
534 | /// Gets maximum unsigned value of APInt for specific bit width. |
535 | static APInt getMaxValue(unsigned numBits) { |
536 | return getAllOnesValue(numBits); |
537 | } |
538 | |
539 | /// Gets maximum signed value of APInt for a specific bit width. |
540 | static APInt getSignedMaxValue(unsigned numBits) { |
541 | APInt API = getAllOnesValue(numBits); |
542 | API.clearBit(numBits - 1); |
543 | return API; |
544 | } |
545 | |
546 | /// Gets minimum unsigned value of APInt for a specific bit width. |
547 | static APInt getMinValue(unsigned numBits) { return APInt(numBits, 0); } |
548 | |
549 | /// Gets minimum signed value of APInt for a specific bit width. |
550 | static APInt getSignedMinValue(unsigned numBits) { |
551 | APInt API(numBits, 0); |
552 | API.setBit(numBits - 1); |
553 | return API; |
554 | } |
555 | |
556 | /// Get the SignMask for a specific bit width. |
557 | /// |
558 | /// This is just a wrapper function of getSignedMinValue(), and it helps code |
559 | /// readability when we want to get a SignMask. |
560 | static APInt getSignMask(unsigned BitWidth) { |
561 | return getSignedMinValue(BitWidth); |
562 | } |
563 | |
564 | /// Get the all-ones value. |
565 | /// |
566 | /// \returns the all-ones value for an APInt of the specified bit-width. |
567 | static APInt getAllOnesValue(unsigned numBits) { |
568 | return APInt(numBits, WORDTYPE_MAX, true); |
569 | } |
570 | |
571 | /// Get the '0' value. |
572 | /// |
573 | /// \returns the '0' value for an APInt of the specified bit-width. |
574 | static APInt getNullValue(unsigned numBits) { return APInt(numBits, 0); } |
575 | |
576 | /// Compute an APInt containing numBits highbits from this APInt. |
577 | /// |
578 | /// Get an APInt with the same BitWidth as this APInt, just zero mask |
579 | /// the low bits and right shift to the least significant bit. |
580 | /// |
581 | /// \returns the high "numBits" bits of this APInt. |
582 | APInt getHiBits(unsigned numBits) const; |
583 | |
584 | /// Compute an APInt containing numBits lowbits from this APInt. |
585 | /// |
586 | /// Get an APInt with the same BitWidth as this APInt, just zero mask |
587 | /// the high bits. |
588 | /// |
589 | /// \returns the low "numBits" bits of this APInt. |
590 | APInt getLoBits(unsigned numBits) const; |
591 | |
592 | /// Return an APInt with exactly one bit set in the result. |
593 | static APInt getOneBitSet(unsigned numBits, unsigned BitNo) { |
594 | APInt Res(numBits, 0); |
595 | Res.setBit(BitNo); |
596 | return Res; |
597 | } |
598 | |
599 | /// Get a value with a block of bits set. |
600 | /// |
601 | /// Constructs an APInt value that has a contiguous range of bits set. The |
602 | /// bits from loBit (inclusive) to hiBit (exclusive) will be set. All other |
603 | /// bits will be zero. For example, with parameters(32, 0, 16) you would get |
604 | /// 0x0000FFFF. Please call getBitsSetWithWrap if \p loBit may be greater than |
605 | /// \p hiBit. |
606 | /// |
607 | /// \param numBits the intended bit width of the result |
608 | /// \param loBit the index of the lowest bit set. |
609 | /// \param hiBit the index of the highest bit set. |
610 | /// |
611 | /// \returns An APInt value with the requested bits set. |
612 | static APInt getBitsSet(unsigned numBits, unsigned loBit, unsigned hiBit) { |
613 | assert(loBit <= hiBit && "loBit greater than hiBit")((loBit <= hiBit && "loBit greater than hiBit") ? static_cast <void> (0) : __assert_fail ("loBit <= hiBit && \"loBit greater than hiBit\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/ADT/APInt.h" , 613, __PRETTY_FUNCTION__)); |
614 | APInt Res(numBits, 0); |
615 | Res.setBits(loBit, hiBit); |
616 | return Res; |
617 | } |
618 | |
619 | /// Wrap version of getBitsSet. |
620 | /// If \p hiBit is bigger than \p loBit, this is same with getBitsSet. |
621 | /// If \p hiBit is not bigger than \p loBit, the set bits "wrap". For example, |
622 | /// with parameters (32, 28, 4), you would get 0xF000000F. |
623 | /// If \p hiBit is equal to \p loBit, you would get a result with all bits |
624 | /// set. |
625 | static APInt getBitsSetWithWrap(unsigned numBits, unsigned loBit, |
626 | unsigned hiBit) { |
627 | APInt Res(numBits, 0); |
628 | Res.setBitsWithWrap(loBit, hiBit); |
629 | return Res; |
630 | } |
631 | |
632 | /// Get a value with upper bits starting at loBit set. |
633 | /// |
634 | /// Constructs an APInt value that has a contiguous range of bits set. The |
635 | /// bits from loBit (inclusive) to numBits (exclusive) will be set. All other |
636 | /// bits will be zero. For example, with parameters(32, 12) you would get |
637 | /// 0xFFFFF000. |
638 | /// |
639 | /// \param numBits the intended bit width of the result |
640 | /// \param loBit the index of the lowest bit to set. |
641 | /// |
642 | /// \returns An APInt value with the requested bits set. |
643 | static APInt getBitsSetFrom(unsigned numBits, unsigned loBit) { |
644 | APInt Res(numBits, 0); |
645 | Res.setBitsFrom(loBit); |
646 | return Res; |
647 | } |
648 | |
649 | /// Get a value with high bits set |
650 | /// |
651 | /// Constructs an APInt value that has the top hiBitsSet bits set. |
652 | /// |
653 | /// \param numBits the bitwidth of the result |
654 | /// \param hiBitsSet the number of high-order bits set in the result. |
655 | static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet) { |
656 | APInt Res(numBits, 0); |
657 | Res.setHighBits(hiBitsSet); |
658 | return Res; |
659 | } |
660 | |
661 | /// Get a value with low bits set |
662 | /// |
663 | /// Constructs an APInt value that has the bottom loBitsSet bits set. |
664 | /// |
665 | /// \param numBits the bitwidth of the result |
666 | /// \param loBitsSet the number of low-order bits set in the result. |
667 | static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet) { |
668 | APInt Res(numBits, 0); |
669 | Res.setLowBits(loBitsSet); |
670 | return Res; |
671 | } |
672 | |
673 | /// Return a value containing V broadcasted over NewLen bits. |
674 | static APInt getSplat(unsigned NewLen, const APInt &V); |
675 | |
676 | /// Determine if two APInts have the same value, after zero-extending |
677 | /// one of them (if needed!) to ensure that the bit-widths match. |
678 | static bool isSameValue(const APInt &I1, const APInt &I2) { |
679 | if (I1.getBitWidth() == I2.getBitWidth()) |
680 | return I1 == I2; |
681 | |
682 | if (I1.getBitWidth() > I2.getBitWidth()) |
683 | return I1 == I2.zext(I1.getBitWidth()); |
684 | |
685 | return I1.zext(I2.getBitWidth()) == I2; |
686 | } |
687 | |
688 | /// Overload to compute a hash_code for an APInt value. |
689 | friend hash_code hash_value(const APInt &Arg); |
690 | |
691 | /// This function returns a pointer to the internal storage of the APInt. |
692 | /// This is useful for writing out the APInt in binary form without any |
693 | /// conversions. |
694 | const uint64_t *getRawData() const { |
695 | if (isSingleWord()) |
696 | return &U.VAL; |
697 | return &U.pVal[0]; |
698 | } |
699 | |
700 | /// @} |
701 | /// \name Unary Operators |
702 | /// @{ |
703 | |
704 | /// Postfix increment operator. |
705 | /// |
706 | /// Increments *this by 1. |
707 | /// |
708 | /// \returns a new APInt value representing the original value of *this. |
709 | const APInt operator++(int) { |
710 | APInt API(*this); |
711 | ++(*this); |
712 | return API; |
713 | } |
714 | |
715 | /// Prefix increment operator. |
716 | /// |
717 | /// \returns *this incremented by one |
718 | APInt &operator++(); |
719 | |
720 | /// Postfix decrement operator. |
721 | /// |
722 | /// Decrements *this by 1. |
723 | /// |
724 | /// \returns a new APInt value representing the original value of *this. |
725 | const APInt operator--(int) { |
726 | APInt API(*this); |
727 | --(*this); |
728 | return API; |
729 | } |
730 | |
731 | /// Prefix decrement operator. |
732 | /// |
733 | /// \returns *this decremented by one. |
734 | APInt &operator--(); |
735 | |
736 | /// Logical negation operator. |
737 | /// |
738 | /// Performs logical negation operation on this APInt. |
739 | /// |
740 | /// \returns true if *this is zero, false otherwise. |
741 | bool operator!() const { |
742 | if (isSingleWord()) |
743 | return U.VAL == 0; |
744 | return countLeadingZerosSlowCase() == BitWidth; |
745 | } |
746 | |
747 | /// @} |
748 | /// \name Assignment Operators |
749 | /// @{ |
750 | |
751 | /// Copy assignment operator. |
752 | /// |
753 | /// \returns *this after assignment of RHS. |
754 | APInt &operator=(const APInt &RHS) { |
755 | // If the bitwidths are the same, we can avoid mucking with memory |
756 | if (isSingleWord() && RHS.isSingleWord()) { |
757 | U.VAL = RHS.U.VAL; |
758 | BitWidth = RHS.BitWidth; |
759 | return clearUnusedBits(); |
760 | } |
761 | |
762 | AssignSlowCase(RHS); |
763 | return *this; |
764 | } |
765 | |
766 | /// Move assignment operator. |
767 | APInt &operator=(APInt &&that) { |
768 | #ifdef EXPENSIVE_CHECKS |
769 | // Some std::shuffle implementations still do self-assignment. |
770 | if (this == &that) |
771 | return *this; |
772 | #endif |
773 | assert(this != &that && "Self-move not supported")((this != &that && "Self-move not supported") ? static_cast <void> (0) : __assert_fail ("this != &that && \"Self-move not supported\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/ADT/APInt.h" , 773, __PRETTY_FUNCTION__)); |
774 | if (!isSingleWord()) |
775 | delete[] U.pVal; |
776 | |
777 | // Use memcpy so that type based alias analysis sees both VAL and pVal |
778 | // as modified. |
779 | memcpy(&U, &that.U, sizeof(U)); |
780 | |
781 | BitWidth = that.BitWidth; |
782 | that.BitWidth = 0; |
783 | |
784 | return *this; |
785 | } |
786 | |
787 | /// Assignment operator. |
788 | /// |
789 | /// The RHS value is assigned to *this. If the significant bits in RHS exceed |
790 | /// the bit width, the excess bits are truncated. If the bit width is larger |
791 | /// than 64, the value is zero filled in the unspecified high order bits. |
792 | /// |
793 | /// \returns *this after assignment of RHS value. |
794 | APInt &operator=(uint64_t RHS) { |
795 | if (isSingleWord()) { |
796 | U.VAL = RHS; |
797 | return clearUnusedBits(); |
798 | } |
799 | U.pVal[0] = RHS; |
800 | memset(U.pVal + 1, 0, (getNumWords() - 1) * APINT_WORD_SIZE); |
801 | return *this; |
802 | } |
803 | |
804 | /// Bitwise AND assignment operator. |
805 | /// |
806 | /// Performs a bitwise AND operation on this APInt and RHS. The result is |
807 | /// assigned to *this. |
808 | /// |
809 | /// \returns *this after ANDing with RHS. |
810 | APInt &operator&=(const APInt &RHS) { |
811 | assert(BitWidth == RHS.BitWidth && "Bit widths must be the same")((BitWidth == RHS.BitWidth && "Bit widths must be the same" ) ? static_cast<void> (0) : __assert_fail ("BitWidth == RHS.BitWidth && \"Bit widths must be the same\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/ADT/APInt.h" , 811, __PRETTY_FUNCTION__)); |
812 | if (isSingleWord()) |
813 | U.VAL &= RHS.U.VAL; |
814 | else |
815 | AndAssignSlowCase(RHS); |
816 | return *this; |
817 | } |
818 | |
819 | /// Bitwise AND assignment operator. |
820 | /// |
821 | /// Performs a bitwise AND operation on this APInt and RHS. RHS is |
822 | /// logically zero-extended or truncated to match the bit-width of |
823 | /// the LHS. |
824 | APInt &operator&=(uint64_t RHS) { |
825 | if (isSingleWord()) { |
826 | U.VAL &= RHS; |
827 | return *this; |
828 | } |
829 | U.pVal[0] &= RHS; |
830 | memset(U.pVal+1, 0, (getNumWords() - 1) * APINT_WORD_SIZE); |
831 | return *this; |
832 | } |
833 | |
834 | /// Bitwise OR assignment operator. |
835 | /// |
836 | /// Performs a bitwise OR operation on this APInt and RHS. The result is |
837 | /// assigned *this; |
838 | /// |
839 | /// \returns *this after ORing with RHS. |
840 | APInt &operator|=(const APInt &RHS) { |
841 | assert(BitWidth == RHS.BitWidth && "Bit widths must be the same")((BitWidth == RHS.BitWidth && "Bit widths must be the same" ) ? static_cast<void> (0) : __assert_fail ("BitWidth == RHS.BitWidth && \"Bit widths must be the same\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/ADT/APInt.h" , 841, __PRETTY_FUNCTION__)); |
842 | if (isSingleWord()) |
843 | U.VAL |= RHS.U.VAL; |
844 | else |
845 | OrAssignSlowCase(RHS); |
846 | return *this; |
847 | } |
848 | |
849 | /// Bitwise OR assignment operator. |
850 | /// |
851 | /// Performs a bitwise OR operation on this APInt and RHS. RHS is |
852 | /// logically zero-extended or truncated to match the bit-width of |
853 | /// the LHS. |
854 | APInt &operator|=(uint64_t RHS) { |
855 | if (isSingleWord()) { |
856 | U.VAL |= RHS; |
857 | return clearUnusedBits(); |
858 | } |
859 | U.pVal[0] |= RHS; |
860 | return *this; |
861 | } |
862 | |
863 | /// Bitwise XOR assignment operator. |
864 | /// |
865 | /// Performs a bitwise XOR operation on this APInt and RHS. The result is |
866 | /// assigned to *this. |
867 | /// |
868 | /// \returns *this after XORing with RHS. |
869 | APInt &operator^=(const APInt &RHS) { |
870 | assert(BitWidth == RHS.BitWidth && "Bit widths must be the same")((BitWidth == RHS.BitWidth && "Bit widths must be the same" ) ? static_cast<void> (0) : __assert_fail ("BitWidth == RHS.BitWidth && \"Bit widths must be the same\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/ADT/APInt.h" , 870, __PRETTY_FUNCTION__)); |
871 | if (isSingleWord()) |
872 | U.VAL ^= RHS.U.VAL; |
873 | else |
874 | XorAssignSlowCase(RHS); |
875 | return *this; |
876 | } |
877 | |
878 | /// Bitwise XOR assignment operator. |
879 | /// |
880 | /// Performs a bitwise XOR operation on this APInt and RHS. RHS is |
881 | /// logically zero-extended or truncated to match the bit-width of |
882 | /// the LHS. |
883 | APInt &operator^=(uint64_t RHS) { |
884 | if (isSingleWord()) { |
885 | U.VAL ^= RHS; |
886 | return clearUnusedBits(); |
887 | } |
888 | U.pVal[0] ^= RHS; |
889 | return *this; |
890 | } |
891 | |
892 | /// Multiplication assignment operator. |
893 | /// |
894 | /// Multiplies this APInt by RHS and assigns the result to *this. |
895 | /// |
896 | /// \returns *this |
897 | APInt &operator*=(const APInt &RHS); |
898 | APInt &operator*=(uint64_t RHS); |
899 | |
900 | /// Addition assignment operator. |
901 | /// |
902 | /// Adds RHS to *this and assigns the result to *this. |
903 | /// |
904 | /// \returns *this |
905 | APInt &operator+=(const APInt &RHS); |
906 | APInt &operator+=(uint64_t RHS); |
907 | |
908 | /// Subtraction assignment operator. |
909 | /// |
910 | /// Subtracts RHS from *this and assigns the result to *this. |
911 | /// |
912 | /// \returns *this |
913 | APInt &operator-=(const APInt &RHS); |
914 | APInt &operator-=(uint64_t RHS); |
915 | |
916 | /// Left-shift assignment function. |
917 | /// |
918 | /// Shifts *this left by shiftAmt and assigns the result to *this. |
919 | /// |
920 | /// \returns *this after shifting left by ShiftAmt |
921 | APInt &operator<<=(unsigned ShiftAmt) { |
922 | assert(ShiftAmt <= BitWidth && "Invalid shift amount")((ShiftAmt <= BitWidth && "Invalid shift amount") ? static_cast<void> (0) : __assert_fail ("ShiftAmt <= BitWidth && \"Invalid shift amount\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/ADT/APInt.h" , 922, __PRETTY_FUNCTION__)); |
923 | if (isSingleWord()) { |
924 | if (ShiftAmt == BitWidth) |
925 | U.VAL = 0; |
926 | else |
927 | U.VAL <<= ShiftAmt; |
928 | return clearUnusedBits(); |
929 | } |
930 | shlSlowCase(ShiftAmt); |
931 | return *this; |
932 | } |
933 | |
934 | /// Left-shift assignment function. |
935 | /// |
936 | /// Shifts *this left by shiftAmt and assigns the result to *this. |
937 | /// |
938 | /// \returns *this after shifting left by ShiftAmt |
939 | APInt &operator<<=(const APInt &ShiftAmt); |
940 | |
941 | /// @} |
942 | /// \name Binary Operators |
943 | /// @{ |
944 | |
945 | /// Multiplication operator. |
946 | /// |
947 | /// Multiplies this APInt by RHS and returns the result. |
948 | APInt operator*(const APInt &RHS) const; |
949 | |
950 | /// Left logical shift operator. |
951 | /// |
952 | /// Shifts this APInt left by \p Bits and returns the result. |
953 | APInt operator<<(unsigned Bits) const { return shl(Bits); } |
954 | |
955 | /// Left logical shift operator. |
956 | /// |
957 | /// Shifts this APInt left by \p Bits and returns the result. |
958 | APInt operator<<(const APInt &Bits) const { return shl(Bits); } |
959 | |
960 | /// Arithmetic right-shift function. |
961 | /// |
962 | /// Arithmetic right-shift this APInt by shiftAmt. |
963 | APInt ashr(unsigned ShiftAmt) const { |
964 | APInt R(*this); |
965 | R.ashrInPlace(ShiftAmt); |
966 | return R; |
967 | } |
968 | |
969 | /// Arithmetic right-shift this APInt by ShiftAmt in place. |
970 | void ashrInPlace(unsigned ShiftAmt) { |
971 | assert(ShiftAmt <= BitWidth && "Invalid shift amount")((ShiftAmt <= BitWidth && "Invalid shift amount") ? static_cast<void> (0) : __assert_fail ("ShiftAmt <= BitWidth && \"Invalid shift amount\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/ADT/APInt.h" , 971, __PRETTY_FUNCTION__)); |
972 | if (isSingleWord()) { |
973 | int64_t SExtVAL = SignExtend64(U.VAL, BitWidth); |
974 | if (ShiftAmt == BitWidth) |
975 | U.VAL = SExtVAL >> (APINT_BITS_PER_WORD - 1); // Fill with sign bit. |
976 | else |
977 | U.VAL = SExtVAL >> ShiftAmt; |
978 | clearUnusedBits(); |
979 | return; |
980 | } |
981 | ashrSlowCase(ShiftAmt); |
982 | } |
983 | |
984 | /// Logical right-shift function. |
985 | /// |
986 | /// Logical right-shift this APInt by shiftAmt. |
987 | APInt lshr(unsigned shiftAmt) const { |
988 | APInt R(*this); |
989 | R.lshrInPlace(shiftAmt); |
990 | return R; |
991 | } |
992 | |
993 | /// Logical right-shift this APInt by ShiftAmt in place. |
994 | void lshrInPlace(unsigned ShiftAmt) { |
995 | assert(ShiftAmt <= BitWidth && "Invalid shift amount")((ShiftAmt <= BitWidth && "Invalid shift amount") ? static_cast<void> (0) : __assert_fail ("ShiftAmt <= BitWidth && \"Invalid shift amount\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/ADT/APInt.h" , 995, __PRETTY_FUNCTION__)); |
996 | if (isSingleWord()) { |
997 | if (ShiftAmt == BitWidth) |
998 | U.VAL = 0; |
999 | else |
1000 | U.VAL >>= ShiftAmt; |
1001 | return; |
1002 | } |
1003 | lshrSlowCase(ShiftAmt); |
1004 | } |
1005 | |
1006 | /// Left-shift function. |
1007 | /// |
1008 | /// Left-shift this APInt by shiftAmt. |
1009 | APInt shl(unsigned shiftAmt) const { |
1010 | APInt R(*this); |
1011 | R <<= shiftAmt; |
1012 | return R; |
1013 | } |
1014 | |
1015 | /// Rotate left by rotateAmt. |
1016 | APInt rotl(unsigned rotateAmt) const; |
1017 | |
1018 | /// Rotate right by rotateAmt. |
1019 | APInt rotr(unsigned rotateAmt) const; |
1020 | |
1021 | /// Arithmetic right-shift function. |
1022 | /// |
1023 | /// Arithmetic right-shift this APInt by shiftAmt. |
1024 | APInt ashr(const APInt &ShiftAmt) const { |
1025 | APInt R(*this); |
1026 | R.ashrInPlace(ShiftAmt); |
1027 | return R; |
1028 | } |
1029 | |
1030 | /// Arithmetic right-shift this APInt by shiftAmt in place. |
1031 | void ashrInPlace(const APInt &shiftAmt); |
1032 | |
1033 | /// Logical right-shift function. |
1034 | /// |
1035 | /// Logical right-shift this APInt by shiftAmt. |
1036 | APInt lshr(const APInt &ShiftAmt) const { |
1037 | APInt R(*this); |
1038 | R.lshrInPlace(ShiftAmt); |
1039 | return R; |
1040 | } |
1041 | |
1042 | /// Logical right-shift this APInt by ShiftAmt in place. |
1043 | void lshrInPlace(const APInt &ShiftAmt); |
1044 | |
1045 | /// Left-shift function. |
1046 | /// |
1047 | /// Left-shift this APInt by shiftAmt. |
1048 | APInt shl(const APInt &ShiftAmt) const { |
1049 | APInt R(*this); |
1050 | R <<= ShiftAmt; |
1051 | return R; |
1052 | } |
1053 | |
1054 | /// Rotate left by rotateAmt. |
1055 | APInt rotl(const APInt &rotateAmt) const; |
1056 | |
1057 | /// Rotate right by rotateAmt. |
1058 | APInt rotr(const APInt &rotateAmt) const; |
1059 | |
1060 | /// Unsigned division operation. |
1061 | /// |
1062 | /// Perform an unsigned divide operation on this APInt by RHS. Both this and |
1063 | /// RHS are treated as unsigned quantities for purposes of this division. |
1064 | /// |
1065 | /// \returns a new APInt value containing the division result, rounded towards |
1066 | /// zero. |
1067 | APInt udiv(const APInt &RHS) const; |
1068 | APInt udiv(uint64_t RHS) const; |
1069 | |
1070 | /// Signed division function for APInt. |
1071 | /// |
1072 | /// Signed divide this APInt by APInt RHS. |
1073 | /// |
1074 | /// The result is rounded towards zero. |
1075 | APInt sdiv(const APInt &RHS) const; |
1076 | APInt sdiv(int64_t RHS) const; |
1077 | |
1078 | /// Unsigned remainder operation. |
1079 | /// |
1080 | /// Perform an unsigned remainder operation on this APInt with RHS being the |
1081 | /// divisor. Both this and RHS are treated as unsigned quantities for purposes |
1082 | /// of this operation. Note that this is a true remainder operation and not a |
1083 | /// modulo operation because the sign follows the sign of the dividend which |
1084 | /// is *this. |
1085 | /// |
1086 | /// \returns a new APInt value containing the remainder result |
1087 | APInt urem(const APInt &RHS) const; |
1088 | uint64_t urem(uint64_t RHS) const; |
1089 | |
1090 | /// Function for signed remainder operation. |
1091 | /// |
1092 | /// Signed remainder operation on APInt. |
1093 | APInt srem(const APInt &RHS) const; |
1094 | int64_t srem(int64_t RHS) const; |
1095 | |
1096 | /// Dual division/remainder interface. |
1097 | /// |
1098 | /// Sometimes it is convenient to divide two APInt values and obtain both the |
1099 | /// quotient and remainder. This function does both operations in the same |
1100 | /// computation making it a little more efficient. The pair of input arguments |
1101 | /// may overlap with the pair of output arguments. It is safe to call |
1102 | /// udivrem(X, Y, X, Y), for example. |
1103 | static void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, |
1104 | APInt &Remainder); |
1105 | static void udivrem(const APInt &LHS, uint64_t RHS, APInt &Quotient, |
1106 | uint64_t &Remainder); |
1107 | |
1108 | static void sdivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, |
1109 | APInt &Remainder); |
1110 | static void sdivrem(const APInt &LHS, int64_t RHS, APInt &Quotient, |
1111 | int64_t &Remainder); |
1112 | |
1113 | // Operations that return overflow indicators. |
1114 | APInt sadd_ov(const APInt &RHS, bool &Overflow) const; |
1115 | APInt uadd_ov(const APInt &RHS, bool &Overflow) const; |
1116 | APInt ssub_ov(const APInt &RHS, bool &Overflow) const; |
1117 | APInt usub_ov(const APInt &RHS, bool &Overflow) const; |
1118 | APInt sdiv_ov(const APInt &RHS, bool &Overflow) const; |
1119 | APInt smul_ov(const APInt &RHS, bool &Overflow) const; |
1120 | APInt umul_ov(const APInt &RHS, bool &Overflow) const; |
1121 | APInt sshl_ov(const APInt &Amt, bool &Overflow) const; |
1122 | APInt ushl_ov(const APInt &Amt, bool &Overflow) const; |
1123 | |
1124 | // Operations that saturate |
1125 | APInt sadd_sat(const APInt &RHS) const; |
1126 | APInt uadd_sat(const APInt &RHS) const; |
1127 | APInt ssub_sat(const APInt &RHS) const; |
1128 | APInt usub_sat(const APInt &RHS) const; |
1129 | APInt smul_sat(const APInt &RHS) const; |
1130 | APInt umul_sat(const APInt &RHS) const; |
1131 | APInt sshl_sat(const APInt &RHS) const; |
1132 | APInt ushl_sat(const APInt &RHS) const; |
1133 | |
1134 | /// Array-indexing support. |
1135 | /// |
1136 | /// \returns the bit value at bitPosition |
1137 | bool operator[](unsigned bitPosition) const { |
1138 | assert(bitPosition < getBitWidth() && "Bit position out of bounds!")((bitPosition < getBitWidth() && "Bit position out of bounds!" ) ? static_cast<void> (0) : __assert_fail ("bitPosition < getBitWidth() && \"Bit position out of bounds!\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/ADT/APInt.h" , 1138, __PRETTY_FUNCTION__)); |
1139 | return (maskBit(bitPosition) & getWord(bitPosition)) != 0; |
1140 | } |
1141 | |
1142 | /// @} |
1143 | /// \name Comparison Operators |
1144 | /// @{ |
1145 | |
1146 | /// Equality operator. |
1147 | /// |
1148 | /// Compares this APInt with RHS for the validity of the equality |
1149 | /// relationship. |
1150 | bool operator==(const APInt &RHS) const { |
1151 | assert(BitWidth == RHS.BitWidth && "Comparison requires equal bit widths")((BitWidth == RHS.BitWidth && "Comparison requires equal bit widths" ) ? static_cast<void> (0) : __assert_fail ("BitWidth == RHS.BitWidth && \"Comparison requires equal bit widths\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/ADT/APInt.h" , 1151, __PRETTY_FUNCTION__)); |
1152 | if (isSingleWord()) |
1153 | return U.VAL == RHS.U.VAL; |
1154 | return EqualSlowCase(RHS); |
1155 | } |
1156 | |
1157 | /// Equality operator. |
1158 | /// |
1159 | /// Compares this APInt with a uint64_t for the validity of the equality |
1160 | /// relationship. |
1161 | /// |
1162 | /// \returns true if *this == Val |
1163 | bool operator==(uint64_t Val) const { |
1164 | return (isSingleWord() || getActiveBits() <= 64) && getZExtValue() == Val; |
1165 | } |
1166 | |
1167 | /// Equality comparison. |
1168 | /// |
1169 | /// Compares this APInt with RHS for the validity of the equality |
1170 | /// relationship. |
1171 | /// |
1172 | /// \returns true if *this == Val |
1173 | bool eq(const APInt &RHS) const { return (*this) == RHS; } |
1174 | |
1175 | /// Inequality operator. |
1176 | /// |
1177 | /// Compares this APInt with RHS for the validity of the inequality |
1178 | /// relationship. |
1179 | /// |
1180 | /// \returns true if *this != Val |
1181 | bool operator!=(const APInt &RHS) const { return !((*this) == RHS); } |
1182 | |
1183 | /// Inequality operator. |
1184 | /// |
1185 | /// Compares this APInt with a uint64_t for the validity of the inequality |
1186 | /// relationship. |
1187 | /// |
1188 | /// \returns true if *this != Val |
1189 | bool operator!=(uint64_t Val) const { return !((*this) == Val); } |
1190 | |
1191 | /// Inequality comparison |
1192 | /// |
1193 | /// Compares this APInt with RHS for the validity of the inequality |
1194 | /// relationship. |
1195 | /// |
1196 | /// \returns true if *this != Val |
1197 | bool ne(const APInt &RHS) const { return !((*this) == RHS); } |
1198 | |
1199 | /// Unsigned less than comparison |
1200 | /// |
1201 | /// Regards both *this and RHS as unsigned quantities and compares them for |
1202 | /// the validity of the less-than relationship. |
1203 | /// |
1204 | /// \returns true if *this < RHS when both are considered unsigned. |
1205 | bool ult(const APInt &RHS) const { return compare(RHS) < 0; } |
1206 | |
1207 | /// Unsigned less than comparison |
1208 | /// |
1209 | /// Regards both *this as an unsigned quantity and compares it with RHS for |
1210 | /// the validity of the less-than relationship. |
1211 | /// |
1212 | /// \returns true if *this < RHS when considered unsigned. |
1213 | bool ult(uint64_t RHS) const { |
1214 | // Only need to check active bits if not a single word. |
1215 | return (isSingleWord() || getActiveBits() <= 64) && getZExtValue() < RHS; |
1216 | } |
1217 | |
1218 | /// Signed less than comparison |
1219 | /// |
1220 | /// Regards both *this and RHS as signed quantities and compares them for |
1221 | /// validity of the less-than relationship. |
1222 | /// |
1223 | /// \returns true if *this < RHS when both are considered signed. |
1224 | bool slt(const APInt &RHS) const { return compareSigned(RHS) < 0; } |
1225 | |
1226 | /// Signed less than comparison |
1227 | /// |
1228 | /// Regards both *this as a signed quantity and compares it with RHS for |
1229 | /// the validity of the less-than relationship. |
1230 | /// |
1231 | /// \returns true if *this < RHS when considered signed. |
1232 | bool slt(int64_t RHS) const { |
1233 | return (!isSingleWord() && getMinSignedBits() > 64) ? isNegative() |
1234 | : getSExtValue() < RHS; |
1235 | } |
1236 | |
1237 | /// Unsigned less or equal comparison |
1238 | /// |
1239 | /// Regards both *this and RHS as unsigned quantities and compares them for |
1240 | /// validity of the less-or-equal relationship. |
1241 | /// |
1242 | /// \returns true if *this <= RHS when both are considered unsigned. |
1243 | bool ule(const APInt &RHS) const { return compare(RHS) <= 0; } |
1244 | |
1245 | /// Unsigned less or equal comparison |
1246 | /// |
1247 | /// Regards both *this as an unsigned quantity and compares it with RHS for |
1248 | /// the validity of the less-or-equal relationship. |
1249 | /// |
1250 | /// \returns true if *this <= RHS when considered unsigned. |
1251 | bool ule(uint64_t RHS) const { return !ugt(RHS); } |
1252 | |
1253 | /// Signed less or equal comparison |
1254 | /// |
1255 | /// Regards both *this and RHS as signed quantities and compares them for |
1256 | /// validity of the less-or-equal relationship. |
1257 | /// |
1258 | /// \returns true if *this <= RHS when both are considered signed. |
1259 | bool sle(const APInt &RHS) const { return compareSigned(RHS) <= 0; } |
1260 | |
1261 | /// Signed less or equal comparison |
1262 | /// |
1263 | /// Regards both *this as a signed quantity and compares it with RHS for the |
1264 | /// validity of the less-or-equal relationship. |
1265 | /// |
1266 | /// \returns true if *this <= RHS when considered signed. |
1267 | bool sle(uint64_t RHS) const { return !sgt(RHS); } |
1268 | |
1269 | /// Unsigned greater than comparison |
1270 | /// |
1271 | /// Regards both *this and RHS as unsigned quantities and compares them for |
1272 | /// the validity of the greater-than relationship. |
1273 | /// |
1274 | /// \returns true if *this > RHS when both are considered unsigned. |
1275 | bool ugt(const APInt &RHS) const { return !ule(RHS); } |
1276 | |
1277 | /// Unsigned greater than comparison |
1278 | /// |
1279 | /// Regards both *this as an unsigned quantity and compares it with RHS for |
1280 | /// the validity of the greater-than relationship. |
1281 | /// |
1282 | /// \returns true if *this > RHS when considered unsigned. |
1283 | bool ugt(uint64_t RHS) const { |
1284 | // Only need to check active bits if not a single word. |
1285 | return (!isSingleWord() && getActiveBits() > 64) || getZExtValue() > RHS; |
1286 | } |
1287 | |
1288 | /// Signed greater than comparison |
1289 | /// |
1290 | /// Regards both *this and RHS as signed quantities and compares them for the |
1291 | /// validity of the greater-than relationship. |
1292 | /// |
1293 | /// \returns true if *this > RHS when both are considered signed. |
1294 | bool sgt(const APInt &RHS) const { return !sle(RHS); } |
1295 | |
1296 | /// Signed greater than comparison |
1297 | /// |
1298 | /// Regards both *this as a signed quantity and compares it with RHS for |
1299 | /// the validity of the greater-than relationship. |
1300 | /// |
1301 | /// \returns true if *this > RHS when considered signed. |
1302 | bool sgt(int64_t RHS) const { |
1303 | return (!isSingleWord() && getMinSignedBits() > 64) ? !isNegative() |
1304 | : getSExtValue() > RHS; |
1305 | } |
1306 | |
1307 | /// Unsigned greater or equal comparison |
1308 | /// |
1309 | /// Regards both *this and RHS as unsigned quantities and compares them for |
1310 | /// validity of the greater-or-equal relationship. |
1311 | /// |
1312 | /// \returns true if *this >= RHS when both are considered unsigned. |
1313 | bool uge(const APInt &RHS) const { return !ult(RHS); } |
1314 | |
1315 | /// Unsigned greater or equal comparison |
1316 | /// |
1317 | /// Regards both *this as an unsigned quantity and compares it with RHS for |
1318 | /// the validity of the greater-or-equal relationship. |
1319 | /// |
1320 | /// \returns true if *this >= RHS when considered unsigned. |
1321 | bool uge(uint64_t RHS) const { return !ult(RHS); } |
1322 | |
1323 | /// Signed greater or equal comparison |
1324 | /// |
1325 | /// Regards both *this and RHS as signed quantities and compares them for |
1326 | /// validity of the greater-or-equal relationship. |
1327 | /// |
1328 | /// \returns true if *this >= RHS when both are considered signed. |
1329 | bool sge(const APInt &RHS) const { return !slt(RHS); } |
1330 | |
1331 | /// Signed greater or equal comparison |
1332 | /// |
1333 | /// Regards both *this as a signed quantity and compares it with RHS for |
1334 | /// the validity of the greater-or-equal relationship. |
1335 | /// |
1336 | /// \returns true if *this >= RHS when considered signed. |
1337 | bool sge(int64_t RHS) const { return !slt(RHS); } |
1338 | |
1339 | /// This operation tests if there are any pairs of corresponding bits |
1340 | /// between this APInt and RHS that are both set. |
1341 | bool intersects(const APInt &RHS) const { |
1342 | assert(BitWidth == RHS.BitWidth && "Bit widths must be the same")((BitWidth == RHS.BitWidth && "Bit widths must be the same" ) ? static_cast<void> (0) : __assert_fail ("BitWidth == RHS.BitWidth && \"Bit widths must be the same\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/ADT/APInt.h" , 1342, __PRETTY_FUNCTION__)); |
1343 | if (isSingleWord()) |
1344 | return (U.VAL & RHS.U.VAL) != 0; |
1345 | return intersectsSlowCase(RHS); |
1346 | } |
1347 | |
1348 | /// This operation checks that all bits set in this APInt are also set in RHS. |
1349 | bool isSubsetOf(const APInt &RHS) const { |
1350 | assert(BitWidth == RHS.BitWidth && "Bit widths must be the same")((BitWidth == RHS.BitWidth && "Bit widths must be the same" ) ? static_cast<void> (0) : __assert_fail ("BitWidth == RHS.BitWidth && \"Bit widths must be the same\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/ADT/APInt.h" , 1350, __PRETTY_FUNCTION__)); |
1351 | if (isSingleWord()) |
1352 | return (U.VAL & ~RHS.U.VAL) == 0; |
1353 | return isSubsetOfSlowCase(RHS); |
1354 | } |
1355 | |
1356 | /// @} |
1357 | /// \name Resizing Operators |
1358 | /// @{ |
1359 | |
1360 | /// Truncate to new width. |
1361 | /// |
1362 | /// Truncate the APInt to a specified width. It is an error to specify a width |
1363 | /// that is greater than or equal to the current width. |
1364 | APInt trunc(unsigned width) const; |
1365 | |
1366 | /// Truncate to new width with unsigned saturation. |
1367 | /// |
1368 | /// If the APInt, treated as unsigned integer, can be losslessly truncated to |
1369 | /// the new bitwidth, then return truncated APInt. Else, return max value. |
1370 | APInt truncUSat(unsigned width) const; |
1371 | |
1372 | /// Truncate to new width with signed saturation. |
1373 | /// |
1374 | /// If this APInt, treated as signed integer, can be losslessly truncated to |
1375 | /// the new bitwidth, then return truncated APInt. Else, return either |
1376 | /// signed min value if the APInt was negative, or signed max value. |
1377 | APInt truncSSat(unsigned width) const; |
1378 | |
1379 | /// Sign extend to a new width. |
1380 | /// |
1381 | /// This operation sign extends the APInt to a new width. If the high order |
1382 | /// bit is set, the fill on the left will be done with 1 bits, otherwise zero. |
1383 | /// It is an error to specify a width that is less than or equal to the |
1384 | /// current width. |
1385 | APInt sext(unsigned width) const; |
1386 | |
1387 | /// Zero extend to a new width. |
1388 | /// |
1389 | /// This operation zero extends the APInt to a new width. The high order bits |
1390 | /// are filled with 0 bits. It is an error to specify a width that is less |
1391 | /// than or equal to the current width. |
1392 | APInt zext(unsigned width) const; |
1393 | |
1394 | /// Sign extend or truncate to width |
1395 | /// |
1396 | /// Make this APInt have the bit width given by \p width. The value is sign |
1397 | /// extended, truncated, or left alone to make it that width. |
1398 | APInt sextOrTrunc(unsigned width) const; |
1399 | |
1400 | /// Zero extend or truncate to width |
1401 | /// |
1402 | /// Make this APInt have the bit width given by \p width. The value is zero |
1403 | /// extended, truncated, or left alone to make it that width. |
1404 | APInt zextOrTrunc(unsigned width) const; |
1405 | |
1406 | /// Truncate to width |
1407 | /// |
1408 | /// Make this APInt have the bit width given by \p width. The value is |
1409 | /// truncated or left alone to make it that width. |
1410 | APInt truncOrSelf(unsigned width) const; |
1411 | |
1412 | /// Sign extend or truncate to width |
1413 | /// |
1414 | /// Make this APInt have the bit width given by \p width. The value is sign |
1415 | /// extended, or left alone to make it that width. |
1416 | APInt sextOrSelf(unsigned width) const; |
1417 | |
1418 | /// Zero extend or truncate to width |
1419 | /// |
1420 | /// Make this APInt have the bit width given by \p width. The value is zero |
1421 | /// extended, or left alone to make it that width. |
1422 | APInt zextOrSelf(unsigned width) const; |
1423 | |
1424 | /// @} |
1425 | /// \name Bit Manipulation Operators |
1426 | /// @{ |
1427 | |
1428 | /// Set every bit to 1. |
1429 | void setAllBits() { |
1430 | if (isSingleWord()) |
1431 | U.VAL = WORDTYPE_MAX; |
1432 | else |
1433 | // Set all the bits in all the words. |
1434 | memset(U.pVal, -1, getNumWords() * APINT_WORD_SIZE); |
1435 | // Clear the unused ones |
1436 | clearUnusedBits(); |
1437 | } |
1438 | |
1439 | /// Set a given bit to 1. |
1440 | /// |
1441 | /// Set the given bit to 1 whose position is given as "bitPosition". |
1442 | void setBit(unsigned BitPosition) { |
1443 | assert(BitPosition < BitWidth && "BitPosition out of range")((BitPosition < BitWidth && "BitPosition out of range" ) ? static_cast<void> (0) : __assert_fail ("BitPosition < BitWidth && \"BitPosition out of range\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/ADT/APInt.h" , 1443, __PRETTY_FUNCTION__)); |
1444 | WordType Mask = maskBit(BitPosition); |
1445 | if (isSingleWord()) |
1446 | U.VAL |= Mask; |
1447 | else |
1448 | U.pVal[whichWord(BitPosition)] |= Mask; |
1449 | } |
1450 | |
1451 | /// Set the sign bit to 1. |
1452 | void setSignBit() { |
1453 | setBit(BitWidth - 1); |
1454 | } |
1455 | |
1456 | /// Set a given bit to a given value. |
1457 | void setBitVal(unsigned BitPosition, bool BitValue) { |
1458 | if (BitValue) |
1459 | setBit(BitPosition); |
1460 | else |
1461 | clearBit(BitPosition); |
1462 | } |
1463 | |
1464 | /// Set the bits from loBit (inclusive) to hiBit (exclusive) to 1. |
1465 | /// This function handles "wrap" case when \p loBit >= \p hiBit, and calls |
1466 | /// setBits when \p loBit < \p hiBit. |
1467 | /// For \p loBit == \p hiBit wrap case, set every bit to 1. |
1468 | void setBitsWithWrap(unsigned loBit, unsigned hiBit) { |
1469 | assert(hiBit <= BitWidth && "hiBit out of range")((hiBit <= BitWidth && "hiBit out of range") ? static_cast <void> (0) : __assert_fail ("hiBit <= BitWidth && \"hiBit out of range\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/ADT/APInt.h" , 1469, __PRETTY_FUNCTION__)); |
1470 | assert(loBit <= BitWidth && "loBit out of range")((loBit <= BitWidth && "loBit out of range") ? static_cast <void> (0) : __assert_fail ("loBit <= BitWidth && \"loBit out of range\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/ADT/APInt.h" , 1470, __PRETTY_FUNCTION__)); |
1471 | if (loBit < hiBit) { |
1472 | setBits(loBit, hiBit); |
1473 | return; |
1474 | } |
1475 | setLowBits(hiBit); |
1476 | setHighBits(BitWidth - loBit); |
1477 | } |
1478 | |
1479 | /// Set the bits from loBit (inclusive) to hiBit (exclusive) to 1. |
1480 | /// This function handles case when \p loBit <= \p hiBit. |
1481 | void setBits(unsigned loBit, unsigned hiBit) { |
1482 | assert(hiBit <= BitWidth && "hiBit out of range")((hiBit <= BitWidth && "hiBit out of range") ? static_cast <void> (0) : __assert_fail ("hiBit <= BitWidth && \"hiBit out of range\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/ADT/APInt.h" , 1482, __PRETTY_FUNCTION__)); |
1483 | assert(loBit <= BitWidth && "loBit out of range")((loBit <= BitWidth && "loBit out of range") ? static_cast <void> (0) : __assert_fail ("loBit <= BitWidth && \"loBit out of range\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/ADT/APInt.h" , 1483, __PRETTY_FUNCTION__)); |
1484 | assert(loBit <= hiBit && "loBit greater than hiBit")((loBit <= hiBit && "loBit greater than hiBit") ? static_cast <void> (0) : __assert_fail ("loBit <= hiBit && \"loBit greater than hiBit\"" , "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/ADT/APInt.h" , 1484, __PRETTY_FUNCTION__)); |
1485 | if (loBit == hiBit) |
1486 | return; |
1487 | if (loBit < APINT_BITS_PER_WORD && hiBit <= APINT_BITS_PER_WORD) { |
1488 | uint64_t mask = WORDTYPE_MAX >> (APINT_BITS_PER_WORD - (hiBit - loBit)); |
1489 | mask <<= loBit; |
1490 | if (isSingleWord()) |
1491 | U.VAL |= mask; |
1492 | else |
1493 | U.pVal[0] |= mask; |
1494 | } else { |
1495 | setBitsSlowCase(loBit, hiBit); |
1496 | } |
1497 | } |
1498 | |
1499 | /// Set the top bits starting from loBit. |
1500 | void setBitsFrom(unsigned loBit) { |
1501 | return setBits(loBit, BitWidth); |
1502 | } |
1503 | |
1504 | /// Set the bottom loBits bits. |
1505 | void setLowBits(unsigned loBits) { |
1506 | return setBits(0, loBits); |