Bug Summary

File:llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp
Warning:line 878, column 7
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name SeparateConstOffsetFromGEP.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mthread-model posix -mframe-pointer=none -fmath-errno -fno-rounding-math -masm-verbose -mconstructor-aliases -munwind-tables -target-cpu x86-64 -dwarf-column-info -fno-split-dwarf-inlining -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-10/lib/clang/10.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/build-llvm/lib/Transforms/Scalar -I /build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Scalar -I /build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/build-llvm/include -I /build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-10/lib/clang/10.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/build-llvm/lib/Transforms/Scalar -fdebug-prefix-map=/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd=. -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2020-01-13-084841-49055-1 -x c++ /build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp

/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp

1//===- SeparateConstOffsetFromGEP.cpp -------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Loop unrolling may create many similar GEPs for array accesses.
10// e.g., a 2-level loop
11//
12// float a[32][32]; // global variable
13//
14// for (int i = 0; i < 2; ++i) {
15// for (int j = 0; j < 2; ++j) {
16// ...
17// ... = a[x + i][y + j];
18// ...
19// }
20// }
21//
22// will probably be unrolled to:
23//
24// gep %a, 0, %x, %y; load
25// gep %a, 0, %x, %y + 1; load
26// gep %a, 0, %x + 1, %y; load
27// gep %a, 0, %x + 1, %y + 1; load
28//
29// LLVM's GVN does not use partial redundancy elimination yet, and is thus
30// unable to reuse (gep %a, 0, %x, %y). As a result, this misoptimization incurs
31// significant slowdown in targets with limited addressing modes. For instance,
32// because the PTX target does not support the reg+reg addressing mode, the
33// NVPTX backend emits PTX code that literally computes the pointer address of
34// each GEP, wasting tons of registers. It emits the following PTX for the
35// first load and similar PTX for other loads.
36//
37// mov.u32 %r1, %x;
38// mov.u32 %r2, %y;
39// mul.wide.u32 %rl2, %r1, 128;
40// mov.u64 %rl3, a;
41// add.s64 %rl4, %rl3, %rl2;
42// mul.wide.u32 %rl5, %r2, 4;
43// add.s64 %rl6, %rl4, %rl5;
44// ld.global.f32 %f1, [%rl6];
45//
46// To reduce the register pressure, the optimization implemented in this file
47// merges the common part of a group of GEPs, so we can compute each pointer
48// address by adding a simple offset to the common part, saving many registers.
49//
50// It works by splitting each GEP into a variadic base and a constant offset.
51// The variadic base can be computed once and reused by multiple GEPs, and the
52// constant offsets can be nicely folded into the reg+immediate addressing mode
53// (supported by most targets) without using any extra register.
54//
55// For instance, we transform the four GEPs and four loads in the above example
56// into:
57//
58// base = gep a, 0, x, y
59// load base
60// laod base + 1 * sizeof(float)
61// load base + 32 * sizeof(float)
62// load base + 33 * sizeof(float)
63//
64// Given the transformed IR, a backend that supports the reg+immediate
65// addressing mode can easily fold the pointer arithmetics into the loads. For
66// example, the NVPTX backend can easily fold the pointer arithmetics into the
67// ld.global.f32 instructions, and the resultant PTX uses much fewer registers.
68//
69// mov.u32 %r1, %tid.x;
70// mov.u32 %r2, %tid.y;
71// mul.wide.u32 %rl2, %r1, 128;
72// mov.u64 %rl3, a;
73// add.s64 %rl4, %rl3, %rl2;
74// mul.wide.u32 %rl5, %r2, 4;
75// add.s64 %rl6, %rl4, %rl5;
76// ld.global.f32 %f1, [%rl6]; // so far the same as unoptimized PTX
77// ld.global.f32 %f2, [%rl6+4]; // much better
78// ld.global.f32 %f3, [%rl6+128]; // much better
79// ld.global.f32 %f4, [%rl6+132]; // much better
80//
81// Another improvement enabled by the LowerGEP flag is to lower a GEP with
82// multiple indices to either multiple GEPs with a single index or arithmetic
83// operations (depending on whether the target uses alias analysis in codegen).
84// Such transformation can have following benefits:
85// (1) It can always extract constants in the indices of structure type.
86// (2) After such Lowering, there are more optimization opportunities such as
87// CSE, LICM and CGP.
88//
89// E.g. The following GEPs have multiple indices:
90// BB1:
91// %p = getelementptr [10 x %struct]* %ptr, i64 %i, i64 %j1, i32 3
92// load %p
93// ...
94// BB2:
95// %p2 = getelementptr [10 x %struct]* %ptr, i64 %i, i64 %j1, i32 2
96// load %p2
97// ...
98//
99// We can not do CSE to the common part related to index "i64 %i". Lowering
100// GEPs can achieve such goals.
101// If the target does not use alias analysis in codegen, this pass will
102// lower a GEP with multiple indices into arithmetic operations:
103// BB1:
104// %1 = ptrtoint [10 x %struct]* %ptr to i64 ; CSE opportunity
105// %2 = mul i64 %i, length_of_10xstruct ; CSE opportunity
106// %3 = add i64 %1, %2 ; CSE opportunity
107// %4 = mul i64 %j1, length_of_struct
108// %5 = add i64 %3, %4
109// %6 = add i64 %3, struct_field_3 ; Constant offset
110// %p = inttoptr i64 %6 to i32*
111// load %p
112// ...
113// BB2:
114// %7 = ptrtoint [10 x %struct]* %ptr to i64 ; CSE opportunity
115// %8 = mul i64 %i, length_of_10xstruct ; CSE opportunity
116// %9 = add i64 %7, %8 ; CSE opportunity
117// %10 = mul i64 %j2, length_of_struct
118// %11 = add i64 %9, %10
119// %12 = add i64 %11, struct_field_2 ; Constant offset
120// %p = inttoptr i64 %12 to i32*
121// load %p2
122// ...
123//
124// If the target uses alias analysis in codegen, this pass will lower a GEP
125// with multiple indices into multiple GEPs with a single index:
126// BB1:
127// %1 = bitcast [10 x %struct]* %ptr to i8* ; CSE opportunity
128// %2 = mul i64 %i, length_of_10xstruct ; CSE opportunity
129// %3 = getelementptr i8* %1, i64 %2 ; CSE opportunity
130// %4 = mul i64 %j1, length_of_struct
131// %5 = getelementptr i8* %3, i64 %4
132// %6 = getelementptr i8* %5, struct_field_3 ; Constant offset
133// %p = bitcast i8* %6 to i32*
134// load %p
135// ...
136// BB2:
137// %7 = bitcast [10 x %struct]* %ptr to i8* ; CSE opportunity
138// %8 = mul i64 %i, length_of_10xstruct ; CSE opportunity
139// %9 = getelementptr i8* %7, i64 %8 ; CSE opportunity
140// %10 = mul i64 %j2, length_of_struct
141// %11 = getelementptr i8* %9, i64 %10
142// %12 = getelementptr i8* %11, struct_field_2 ; Constant offset
143// %p2 = bitcast i8* %12 to i32*
144// load %p2
145// ...
146//
147// Lowering GEPs can also benefit other passes such as LICM and CGP.
148// LICM (Loop Invariant Code Motion) can not hoist/sink a GEP of multiple
149// indices if one of the index is variant. If we lower such GEP into invariant
150// parts and variant parts, LICM can hoist/sink those invariant parts.
151// CGP (CodeGen Prepare) tries to sink address calculations that match the
152// target's addressing modes. A GEP with multiple indices may not match and will
153// not be sunk. If we lower such GEP into smaller parts, CGP may sink some of
154// them. So we end up with a better addressing mode.
155//
156//===----------------------------------------------------------------------===//
157
158#include "llvm/ADT/APInt.h"
159#include "llvm/ADT/DenseMap.h"
160#include "llvm/ADT/DepthFirstIterator.h"
161#include "llvm/ADT/SmallVector.h"
162#include "llvm/Analysis/LoopInfo.h"
163#include "llvm/Analysis/MemoryBuiltins.h"
164#include "llvm/Analysis/ScalarEvolution.h"
165#include "llvm/Analysis/TargetLibraryInfo.h"
166#include "llvm/Analysis/TargetTransformInfo.h"
167#include "llvm/Analysis/ValueTracking.h"
168#include "llvm/IR/BasicBlock.h"
169#include "llvm/IR/Constant.h"
170#include "llvm/IR/Constants.h"
171#include "llvm/IR/DataLayout.h"
172#include "llvm/IR/DerivedTypes.h"
173#include "llvm/IR/Dominators.h"
174#include "llvm/IR/Function.h"
175#include "llvm/IR/GetElementPtrTypeIterator.h"
176#include "llvm/IR/IRBuilder.h"
177#include "llvm/IR/Instruction.h"
178#include "llvm/IR/Instructions.h"
179#include "llvm/IR/Module.h"
180#include "llvm/IR/PatternMatch.h"
181#include "llvm/IR/Type.h"
182#include "llvm/IR/User.h"
183#include "llvm/IR/Value.h"
184#include "llvm/InitializePasses.h"
185#include "llvm/Pass.h"
186#include "llvm/Support/Casting.h"
187#include "llvm/Support/CommandLine.h"
188#include "llvm/Support/ErrorHandling.h"
189#include "llvm/Support/raw_ostream.h"
190#include "llvm/Target/TargetMachine.h"
191#include "llvm/Transforms/Scalar.h"
192#include "llvm/Transforms/Utils/Local.h"
193#include <cassert>
194#include <cstdint>
195#include <string>
196
197using namespace llvm;
198using namespace llvm::PatternMatch;
199
200static cl::opt<bool> DisableSeparateConstOffsetFromGEP(
201 "disable-separate-const-offset-from-gep", cl::init(false),
202 cl::desc("Do not separate the constant offset from a GEP instruction"),
203 cl::Hidden);
204
205// Setting this flag may emit false positives when the input module already
206// contains dead instructions. Therefore, we set it only in unit tests that are
207// free of dead code.
208static cl::opt<bool>
209 VerifyNoDeadCode("reassociate-geps-verify-no-dead-code", cl::init(false),
210 cl::desc("Verify this pass produces no dead code"),
211 cl::Hidden);
212
213namespace {
214
215/// A helper class for separating a constant offset from a GEP index.
216///
217/// In real programs, a GEP index may be more complicated than a simple addition
218/// of something and a constant integer which can be trivially splitted. For
219/// example, to split ((a << 3) | 5) + b, we need to search deeper for the
220/// constant offset, so that we can separate the index to (a << 3) + b and 5.
221///
222/// Therefore, this class looks into the expression that computes a given GEP
223/// index, and tries to find a constant integer that can be hoisted to the
224/// outermost level of the expression as an addition. Not every constant in an
225/// expression can jump out. e.g., we cannot transform (b * (a + 5)) to (b * a +
226/// 5); nor can we transform (3 * (a + 5)) to (3 * a + 5), however in this case,
227/// -instcombine probably already optimized (3 * (a + 5)) to (3 * a + 15).
228class ConstantOffsetExtractor {
229public:
230 /// Extracts a constant offset from the given GEP index. It returns the
231 /// new index representing the remainder (equal to the original index minus
232 /// the constant offset), or nullptr if we cannot extract a constant offset.
233 /// \p Idx The given GEP index
234 /// \p GEP The given GEP
235 /// \p UserChainTail Outputs the tail of UserChain so that we can
236 /// garbage-collect unused instructions in UserChain.
237 static Value *Extract(Value *Idx, GetElementPtrInst *GEP,
238 User *&UserChainTail, const DominatorTree *DT);
239
240 /// Looks for a constant offset from the given GEP index without extracting
241 /// it. It returns the numeric value of the extracted constant offset (0 if
242 /// failed). The meaning of the arguments are the same as Extract.
243 static int64_t Find(Value *Idx, GetElementPtrInst *GEP,
244 const DominatorTree *DT);
245
246private:
247 ConstantOffsetExtractor(Instruction *InsertionPt, const DominatorTree *DT)
248 : IP(InsertionPt), DL(InsertionPt->getModule()->getDataLayout()), DT(DT) {
249 }
250
251 /// Searches the expression that computes V for a non-zero constant C s.t.
252 /// V can be reassociated into the form V' + C. If the searching is
253 /// successful, returns C and update UserChain as a def-use chain from C to V;
254 /// otherwise, UserChain is empty.
255 ///
256 /// \p V The given expression
257 /// \p SignExtended Whether V will be sign-extended in the computation of the
258 /// GEP index
259 /// \p ZeroExtended Whether V will be zero-extended in the computation of the
260 /// GEP index
261 /// \p NonNegative Whether V is guaranteed to be non-negative. For example,
262 /// an index of an inbounds GEP is guaranteed to be
263 /// non-negative. Levaraging this, we can better split
264 /// inbounds GEPs.
265 APInt find(Value *V, bool SignExtended, bool ZeroExtended, bool NonNegative);
266
267 /// A helper function to look into both operands of a binary operator.
268 APInt findInEitherOperand(BinaryOperator *BO, bool SignExtended,
269 bool ZeroExtended);
270
271 /// After finding the constant offset C from the GEP index I, we build a new
272 /// index I' s.t. I' + C = I. This function builds and returns the new
273 /// index I' according to UserChain produced by function "find".
274 ///
275 /// The building conceptually takes two steps:
276 /// 1) iteratively distribute s/zext towards the leaves of the expression tree
277 /// that computes I
278 /// 2) reassociate the expression tree to the form I' + C.
279 ///
280 /// For example, to extract the 5 from sext(a + (b + 5)), we first distribute
281 /// sext to a, b and 5 so that we have
282 /// sext(a) + (sext(b) + 5).
283 /// Then, we reassociate it to
284 /// (sext(a) + sext(b)) + 5.
285 /// Given this form, we know I' is sext(a) + sext(b).
286 Value *rebuildWithoutConstOffset();
287
288 /// After the first step of rebuilding the GEP index without the constant
289 /// offset, distribute s/zext to the operands of all operators in UserChain.
290 /// e.g., zext(sext(a + (b + 5)) (assuming no overflow) =>
291 /// zext(sext(a)) + (zext(sext(b)) + zext(sext(5))).
292 ///
293 /// The function also updates UserChain to point to new subexpressions after
294 /// distributing s/zext. e.g., the old UserChain of the above example is
295 /// 5 -> b + 5 -> a + (b + 5) -> sext(...) -> zext(sext(...)),
296 /// and the new UserChain is
297 /// zext(sext(5)) -> zext(sext(b)) + zext(sext(5)) ->
298 /// zext(sext(a)) + (zext(sext(b)) + zext(sext(5))
299 ///
300 /// \p ChainIndex The index to UserChain. ChainIndex is initially
301 /// UserChain.size() - 1, and is decremented during
302 /// the recursion.
303 Value *distributeExtsAndCloneChain(unsigned ChainIndex);
304
305 /// Reassociates the GEP index to the form I' + C and returns I'.
306 Value *removeConstOffset(unsigned ChainIndex);
307
308 /// A helper function to apply ExtInsts, a list of s/zext, to value V.
309 /// e.g., if ExtInsts = [sext i32 to i64, zext i16 to i32], this function
310 /// returns "sext i32 (zext i16 V to i32) to i64".
311 Value *applyExts(Value *V);
312
313 /// A helper function that returns whether we can trace into the operands
314 /// of binary operator BO for a constant offset.
315 ///
316 /// \p SignExtended Whether BO is surrounded by sext
317 /// \p ZeroExtended Whether BO is surrounded by zext
318 /// \p NonNegative Whether BO is known to be non-negative, e.g., an in-bound
319 /// array index.
320 bool CanTraceInto(bool SignExtended, bool ZeroExtended, BinaryOperator *BO,
321 bool NonNegative);
322
323 /// The path from the constant offset to the old GEP index. e.g., if the GEP
324 /// index is "a * b + (c + 5)". After running function find, UserChain[0] will
325 /// be the constant 5, UserChain[1] will be the subexpression "c + 5", and
326 /// UserChain[2] will be the entire expression "a * b + (c + 5)".
327 ///
328 /// This path helps to rebuild the new GEP index.
329 SmallVector<User *, 8> UserChain;
330
331 /// A data structure used in rebuildWithoutConstOffset. Contains all
332 /// sext/zext instructions along UserChain.
333 SmallVector<CastInst *, 16> ExtInsts;
334
335 /// Insertion position of cloned instructions.
336 Instruction *IP;
337
338 const DataLayout &DL;
339 const DominatorTree *DT;
340};
341
342/// A pass that tries to split every GEP in the function into a variadic
343/// base and a constant offset. It is a FunctionPass because searching for the
344/// constant offset may inspect other basic blocks.
345class SeparateConstOffsetFromGEP : public FunctionPass {
346public:
347 static char ID;
348
349 SeparateConstOffsetFromGEP(bool LowerGEP = false)
350 : FunctionPass(ID), LowerGEP(LowerGEP) {
351 initializeSeparateConstOffsetFromGEPPass(*PassRegistry::getPassRegistry());
352 }
353
354 void getAnalysisUsage(AnalysisUsage &AU) const override {
355 AU.addRequired<DominatorTreeWrapperPass>();
356 AU.addRequired<ScalarEvolutionWrapperPass>();
357 AU.addRequired<TargetTransformInfoWrapperPass>();
358 AU.addRequired<LoopInfoWrapperPass>();
359 AU.setPreservesCFG();
360 AU.addRequired<TargetLibraryInfoWrapperPass>();
361 }
362
363 bool doInitialization(Module &M) override {
364 DL = &M.getDataLayout();
365 return false;
366 }
367
368 bool runOnFunction(Function &F) override;
369
370private:
371 /// Tries to split the given GEP into a variadic base and a constant offset,
372 /// and returns true if the splitting succeeds.
373 bool splitGEP(GetElementPtrInst *GEP);
374
375 /// Lower a GEP with multiple indices into multiple GEPs with a single index.
376 /// Function splitGEP already split the original GEP into a variadic part and
377 /// a constant offset (i.e., AccumulativeByteOffset). This function lowers the
378 /// variadic part into a set of GEPs with a single index and applies
379 /// AccumulativeByteOffset to it.
380 /// \p Variadic The variadic part of the original GEP.
381 /// \p AccumulativeByteOffset The constant offset.
382 void lowerToSingleIndexGEPs(GetElementPtrInst *Variadic,
383 int64_t AccumulativeByteOffset);
384
385 /// Lower a GEP with multiple indices into ptrtoint+arithmetics+inttoptr form.
386 /// Function splitGEP already split the original GEP into a variadic part and
387 /// a constant offset (i.e., AccumulativeByteOffset). This function lowers the
388 /// variadic part into a set of arithmetic operations and applies
389 /// AccumulativeByteOffset to it.
390 /// \p Variadic The variadic part of the original GEP.
391 /// \p AccumulativeByteOffset The constant offset.
392 void lowerToArithmetics(GetElementPtrInst *Variadic,
393 int64_t AccumulativeByteOffset);
394
395 /// Finds the constant offset within each index and accumulates them. If
396 /// LowerGEP is true, it finds in indices of both sequential and structure
397 /// types, otherwise it only finds in sequential indices. The output
398 /// NeedsExtraction indicates whether we successfully find a non-zero constant
399 /// offset.
400 int64_t accumulateByteOffset(GetElementPtrInst *GEP, bool &NeedsExtraction);
401
402 /// Canonicalize array indices to pointer-size integers. This helps to
403 /// simplify the logic of splitting a GEP. For example, if a + b is a
404 /// pointer-size integer, we have
405 /// gep base, a + b = gep (gep base, a), b
406 /// However, this equality may not hold if the size of a + b is smaller than
407 /// the pointer size, because LLVM conceptually sign-extends GEP indices to
408 /// pointer size before computing the address
409 /// (http://llvm.org/docs/LangRef.html#id181).
410 ///
411 /// This canonicalization is very likely already done in clang and
412 /// instcombine. Therefore, the program will probably remain the same.
413 ///
414 /// Returns true if the module changes.
415 ///
416 /// Verified in @i32_add in split-gep.ll
417 bool canonicalizeArrayIndicesToPointerSize(GetElementPtrInst *GEP);
418
419 /// Optimize sext(a)+sext(b) to sext(a+b) when a+b can't sign overflow.
420 /// SeparateConstOffsetFromGEP distributes a sext to leaves before extracting
421 /// the constant offset. After extraction, it becomes desirable to reunion the
422 /// distributed sexts. For example,
423 ///
424 /// &a[sext(i +nsw (j +nsw 5)]
425 /// => distribute &a[sext(i) +nsw (sext(j) +nsw 5)]
426 /// => constant extraction &a[sext(i) + sext(j)] + 5
427 /// => reunion &a[sext(i +nsw j)] + 5
428 bool reuniteExts(Function &F);
429
430 /// A helper that reunites sexts in an instruction.
431 bool reuniteExts(Instruction *I);
432
433 /// Find the closest dominator of <Dominatee> that is equivalent to <Key>.
434 Instruction *findClosestMatchingDominator(const SCEV *Key,
435 Instruction *Dominatee);
436 /// Verify F is free of dead code.
437 void verifyNoDeadCode(Function &F);
438
439 bool hasMoreThanOneUseInLoop(Value *v, Loop *L);
440
441 // Swap the index operand of two GEP.
442 void swapGEPOperand(GetElementPtrInst *First, GetElementPtrInst *Second);
443
444 // Check if it is safe to swap operand of two GEP.
445 bool isLegalToSwapOperand(GetElementPtrInst *First, GetElementPtrInst *Second,
446 Loop *CurLoop);
447
448 const DataLayout *DL = nullptr;
449 DominatorTree *DT = nullptr;
450 ScalarEvolution *SE;
451
452 LoopInfo *LI;
453 TargetLibraryInfo *TLI;
454
455 /// Whether to lower a GEP with multiple indices into arithmetic operations or
456 /// multiple GEPs with a single index.
457 bool LowerGEP;
458
459 DenseMap<const SCEV *, SmallVector<Instruction *, 2>> DominatingExprs;
460};
461
462} // end anonymous namespace
463
464char SeparateConstOffsetFromGEP::ID = 0;
465
466INITIALIZE_PASS_BEGIN(static void *initializeSeparateConstOffsetFromGEPPassOnce(PassRegistry
&Registry) {
467 SeparateConstOffsetFromGEP, "separate-const-offset-from-gep",static void *initializeSeparateConstOffsetFromGEPPassOnce(PassRegistry
&Registry) {
468 "Split GEPs to a variadic base and a constant offset for better CSE", false,static void *initializeSeparateConstOffsetFromGEPPassOnce(PassRegistry
&Registry) {
469 false)static void *initializeSeparateConstOffsetFromGEPPassOnce(PassRegistry
&Registry) {
470INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)initializeDominatorTreeWrapperPassPass(Registry);
471INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)initializeScalarEvolutionWrapperPassPass(Registry);
472INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)initializeTargetTransformInfoWrapperPassPass(Registry);
473INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)initializeLoopInfoWrapperPassPass(Registry);
474INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)initializeTargetLibraryInfoWrapperPassPass(Registry);
475INITIALIZE_PASS_END(PassInfo *PI = new PassInfo( "Split GEPs to a variadic base and a constant offset for better CSE"
, "separate-const-offset-from-gep", &SeparateConstOffsetFromGEP
::ID, PassInfo::NormalCtor_t(callDefaultCtor<SeparateConstOffsetFromGEP
>), false, false); Registry.registerPass(*PI, true); return
PI; } static llvm::once_flag InitializeSeparateConstOffsetFromGEPPassFlag
; void llvm::initializeSeparateConstOffsetFromGEPPass(PassRegistry
&Registry) { llvm::call_once(InitializeSeparateConstOffsetFromGEPPassFlag
, initializeSeparateConstOffsetFromGEPPassOnce, std::ref(Registry
)); }
476 SeparateConstOffsetFromGEP, "separate-const-offset-from-gep",PassInfo *PI = new PassInfo( "Split GEPs to a variadic base and a constant offset for better CSE"
, "separate-const-offset-from-gep", &SeparateConstOffsetFromGEP
::ID, PassInfo::NormalCtor_t(callDefaultCtor<SeparateConstOffsetFromGEP
>), false, false); Registry.registerPass(*PI, true); return
PI; } static llvm::once_flag InitializeSeparateConstOffsetFromGEPPassFlag
; void llvm::initializeSeparateConstOffsetFromGEPPass(PassRegistry
&Registry) { llvm::call_once(InitializeSeparateConstOffsetFromGEPPassFlag
, initializeSeparateConstOffsetFromGEPPassOnce, std::ref(Registry
)); }
477 "Split GEPs to a variadic base and a constant offset for better CSE", false,PassInfo *PI = new PassInfo( "Split GEPs to a variadic base and a constant offset for better CSE"
, "separate-const-offset-from-gep", &SeparateConstOffsetFromGEP
::ID, PassInfo::NormalCtor_t(callDefaultCtor<SeparateConstOffsetFromGEP
>), false, false); Registry.registerPass(*PI, true); return
PI; } static llvm::once_flag InitializeSeparateConstOffsetFromGEPPassFlag
; void llvm::initializeSeparateConstOffsetFromGEPPass(PassRegistry
&Registry) { llvm::call_once(InitializeSeparateConstOffsetFromGEPPassFlag
, initializeSeparateConstOffsetFromGEPPassOnce, std::ref(Registry
)); }
478 false)PassInfo *PI = new PassInfo( "Split GEPs to a variadic base and a constant offset for better CSE"
, "separate-const-offset-from-gep", &SeparateConstOffsetFromGEP
::ID, PassInfo::NormalCtor_t(callDefaultCtor<SeparateConstOffsetFromGEP
>), false, false); Registry.registerPass(*PI, true); return
PI; } static llvm::once_flag InitializeSeparateConstOffsetFromGEPPassFlag
; void llvm::initializeSeparateConstOffsetFromGEPPass(PassRegistry
&Registry) { llvm::call_once(InitializeSeparateConstOffsetFromGEPPassFlag
, initializeSeparateConstOffsetFromGEPPassOnce, std::ref(Registry
)); }
479
480FunctionPass *llvm::createSeparateConstOffsetFromGEPPass(bool LowerGEP) {
481 return new SeparateConstOffsetFromGEP(LowerGEP);
482}
483
484bool ConstantOffsetExtractor::CanTraceInto(bool SignExtended,
485 bool ZeroExtended,
486 BinaryOperator *BO,
487 bool NonNegative) {
488 // We only consider ADD, SUB and OR, because a non-zero constant found in
489 // expressions composed of these operations can be easily hoisted as a
490 // constant offset by reassociation.
491 if (BO->getOpcode() != Instruction::Add &&
492 BO->getOpcode() != Instruction::Sub &&
493 BO->getOpcode() != Instruction::Or) {
494 return false;
495 }
496
497 Value *LHS = BO->getOperand(0), *RHS = BO->getOperand(1);
498 // Do not trace into "or" unless it is equivalent to "add". If LHS and RHS
499 // don't have common bits, (LHS | RHS) is equivalent to (LHS + RHS).
500 // FIXME: this does not appear to be covered by any tests
501 // (with x86/aarch64 backends at least)
502 if (BO->getOpcode() == Instruction::Or &&
503 !haveNoCommonBitsSet(LHS, RHS, DL, nullptr, BO, DT))
504 return false;
505
506 // In addition, tracing into BO requires that its surrounding s/zext (if
507 // any) is distributable to both operands.
508 //
509 // Suppose BO = A op B.
510 // SignExtended | ZeroExtended | Distributable?
511 // --------------+--------------+----------------------------------
512 // 0 | 0 | true because no s/zext exists
513 // 0 | 1 | zext(BO) == zext(A) op zext(B)
514 // 1 | 0 | sext(BO) == sext(A) op sext(B)
515 // 1 | 1 | zext(sext(BO)) ==
516 // | | zext(sext(A)) op zext(sext(B))
517 if (BO->getOpcode() == Instruction::Add && !ZeroExtended && NonNegative) {
518 // If a + b >= 0 and (a >= 0 or b >= 0), then
519 // sext(a + b) = sext(a) + sext(b)
520 // even if the addition is not marked nsw.
521 //
522 // Leveraging this invarient, we can trace into an sext'ed inbound GEP
523 // index if the constant offset is non-negative.
524 //
525 // Verified in @sext_add in split-gep.ll.
526 if (ConstantInt *ConstLHS = dyn_cast<ConstantInt>(LHS)) {
527 if (!ConstLHS->isNegative())
528 return true;
529 }
530 if (ConstantInt *ConstRHS = dyn_cast<ConstantInt>(RHS)) {
531 if (!ConstRHS->isNegative())
532 return true;
533 }
534 }
535
536 // sext (add/sub nsw A, B) == add/sub nsw (sext A), (sext B)
537 // zext (add/sub nuw A, B) == add/sub nuw (zext A), (zext B)
538 if (BO->getOpcode() == Instruction::Add ||
539 BO->getOpcode() == Instruction::Sub) {
540 if (SignExtended && !BO->hasNoSignedWrap())
541 return false;
542 if (ZeroExtended && !BO->hasNoUnsignedWrap())
543 return false;
544 }
545
546 return true;
547}
548
549APInt ConstantOffsetExtractor::findInEitherOperand(BinaryOperator *BO,
550 bool SignExtended,
551 bool ZeroExtended) {
552 // BO being non-negative does not shed light on whether its operands are
553 // non-negative. Clear the NonNegative flag here.
554 APInt ConstantOffset = find(BO->getOperand(0), SignExtended, ZeroExtended,
555 /* NonNegative */ false);
556 // If we found a constant offset in the left operand, stop and return that.
557 // This shortcut might cause us to miss opportunities of combining the
558 // constant offsets in both operands, e.g., (a + 4) + (b + 5) => (a + b) + 9.
559 // However, such cases are probably already handled by -instcombine,
560 // given this pass runs after the standard optimizations.
561 if (ConstantOffset != 0) return ConstantOffset;
562 ConstantOffset = find(BO->getOperand(1), SignExtended, ZeroExtended,
563 /* NonNegative */ false);
564 // If U is a sub operator, negate the constant offset found in the right
565 // operand.
566 if (BO->getOpcode() == Instruction::Sub)
567 ConstantOffset = -ConstantOffset;
568 return ConstantOffset;
569}
570
571APInt ConstantOffsetExtractor::find(Value *V, bool SignExtended,
572 bool ZeroExtended, bool NonNegative) {
573 // TODO(jingyue): We could trace into integer/pointer casts, such as
574 // inttoptr, ptrtoint, bitcast, and addrspacecast. We choose to handle only
575 // integers because it gives good enough results for our benchmarks.
576 unsigned BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
577
578 // We cannot do much with Values that are not a User, such as an Argument.
579 User *U = dyn_cast<User>(V);
580 if (U == nullptr) return APInt(BitWidth, 0);
581
582 APInt ConstantOffset(BitWidth, 0);
583 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
584 // Hooray, we found it!
585 ConstantOffset = CI->getValue();
586 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(V)) {
587 // Trace into subexpressions for more hoisting opportunities.
588 if (CanTraceInto(SignExtended, ZeroExtended, BO, NonNegative))
589 ConstantOffset = findInEitherOperand(BO, SignExtended, ZeroExtended);
590 } else if (isa<TruncInst>(V)) {
591 ConstantOffset =
592 find(U->getOperand(0), SignExtended, ZeroExtended, NonNegative)
593 .trunc(BitWidth);
594 } else if (isa<SExtInst>(V)) {
595 ConstantOffset = find(U->getOperand(0), /* SignExtended */ true,
596 ZeroExtended, NonNegative).sext(BitWidth);
597 } else if (isa<ZExtInst>(V)) {
598 // As an optimization, we can clear the SignExtended flag because
599 // sext(zext(a)) = zext(a). Verified in @sext_zext in split-gep.ll.
600 //
601 // Clear the NonNegative flag, because zext(a) >= 0 does not imply a >= 0.
602 ConstantOffset =
603 find(U->getOperand(0), /* SignExtended */ false,
604 /* ZeroExtended */ true, /* NonNegative */ false).zext(BitWidth);
605 }
606
607 // If we found a non-zero constant offset, add it to the path for
608 // rebuildWithoutConstOffset. Zero is a valid constant offset, but doesn't
609 // help this optimization.
610 if (ConstantOffset != 0)
611 UserChain.push_back(U);
612 return ConstantOffset;
613}
614
615Value *ConstantOffsetExtractor::applyExts(Value *V) {
616 Value *Current = V;
617 // ExtInsts is built in the use-def order. Therefore, we apply them to V
618 // in the reversed order.
619 for (auto I = ExtInsts.rbegin(), E = ExtInsts.rend(); I != E; ++I) {
620 if (Constant *C = dyn_cast<Constant>(Current)) {
621 // If Current is a constant, apply s/zext using ConstantExpr::getCast.
622 // ConstantExpr::getCast emits a ConstantInt if C is a ConstantInt.
623 Current = ConstantExpr::getCast((*I)->getOpcode(), C, (*I)->getType());
624 } else {
625 Instruction *Ext = (*I)->clone();
626 Ext->setOperand(0, Current);
627 Ext->insertBefore(IP);
628 Current = Ext;
629 }
630 }
631 return Current;
632}
633
634Value *ConstantOffsetExtractor::rebuildWithoutConstOffset() {
635 distributeExtsAndCloneChain(UserChain.size() - 1);
636 // Remove all nullptrs (used to be s/zext) from UserChain.
637 unsigned NewSize = 0;
638 for (User *I : UserChain) {
639 if (I != nullptr) {
640 UserChain[NewSize] = I;
641 NewSize++;
642 }
643 }
644 UserChain.resize(NewSize);
645 return removeConstOffset(UserChain.size() - 1);
646}
647
648Value *
649ConstantOffsetExtractor::distributeExtsAndCloneChain(unsigned ChainIndex) {
650 User *U = UserChain[ChainIndex];
651 if (ChainIndex == 0) {
652 assert(isa<ConstantInt>(U))((isa<ConstantInt>(U)) ? static_cast<void> (0) : __assert_fail
("isa<ConstantInt>(U)", "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp"
, 652, __PRETTY_FUNCTION__))
;
653 // If U is a ConstantInt, applyExts will return a ConstantInt as well.
654 return UserChain[ChainIndex] = cast<ConstantInt>(applyExts(U));
655 }
656
657 if (CastInst *Cast = dyn_cast<CastInst>(U)) {
658 assert((((isa<SExtInst>(Cast) || isa<ZExtInst>(Cast) || isa
<TruncInst>(Cast)) && "Only following instructions can be traced: sext, zext & trunc"
) ? static_cast<void> (0) : __assert_fail ("(isa<SExtInst>(Cast) || isa<ZExtInst>(Cast) || isa<TruncInst>(Cast)) && \"Only following instructions can be traced: sext, zext & trunc\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp"
, 660, __PRETTY_FUNCTION__))
659 (isa<SExtInst>(Cast) || isa<ZExtInst>(Cast) || isa<TruncInst>(Cast)) &&(((isa<SExtInst>(Cast) || isa<ZExtInst>(Cast) || isa
<TruncInst>(Cast)) && "Only following instructions can be traced: sext, zext & trunc"
) ? static_cast<void> (0) : __assert_fail ("(isa<SExtInst>(Cast) || isa<ZExtInst>(Cast) || isa<TruncInst>(Cast)) && \"Only following instructions can be traced: sext, zext & trunc\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp"
, 660, __PRETTY_FUNCTION__))
660 "Only following instructions can be traced: sext, zext & trunc")(((isa<SExtInst>(Cast) || isa<ZExtInst>(Cast) || isa
<TruncInst>(Cast)) && "Only following instructions can be traced: sext, zext & trunc"
) ? static_cast<void> (0) : __assert_fail ("(isa<SExtInst>(Cast) || isa<ZExtInst>(Cast) || isa<TruncInst>(Cast)) && \"Only following instructions can be traced: sext, zext & trunc\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp"
, 660, __PRETTY_FUNCTION__))
;
661 ExtInsts.push_back(Cast);
662 UserChain[ChainIndex] = nullptr;
663 return distributeExtsAndCloneChain(ChainIndex - 1);
664 }
665
666 // Function find only trace into BinaryOperator and CastInst.
667 BinaryOperator *BO = cast<BinaryOperator>(U);
668 // OpNo = which operand of BO is UserChain[ChainIndex - 1]
669 unsigned OpNo = (BO->getOperand(0) == UserChain[ChainIndex - 1] ? 0 : 1);
670 Value *TheOther = applyExts(BO->getOperand(1 - OpNo));
671 Value *NextInChain = distributeExtsAndCloneChain(ChainIndex - 1);
672
673 BinaryOperator *NewBO = nullptr;
674 if (OpNo == 0) {
675 NewBO = BinaryOperator::Create(BO->getOpcode(), NextInChain, TheOther,
676 BO->getName(), IP);
677 } else {
678 NewBO = BinaryOperator::Create(BO->getOpcode(), TheOther, NextInChain,
679 BO->getName(), IP);
680 }
681 return UserChain[ChainIndex] = NewBO;
682}
683
684Value *ConstantOffsetExtractor::removeConstOffset(unsigned ChainIndex) {
685 if (ChainIndex == 0) {
686 assert(isa<ConstantInt>(UserChain[ChainIndex]))((isa<ConstantInt>(UserChain[ChainIndex])) ? static_cast
<void> (0) : __assert_fail ("isa<ConstantInt>(UserChain[ChainIndex])"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp"
, 686, __PRETTY_FUNCTION__))
;
687 return ConstantInt::getNullValue(UserChain[ChainIndex]->getType());
688 }
689
690 BinaryOperator *BO = cast<BinaryOperator>(UserChain[ChainIndex]);
691 assert(BO->getNumUses() <= 1 &&((BO->getNumUses() <= 1 && "distributeExtsAndCloneChain clones each BinaryOperator in "
"UserChain, so no one should be used more than " "once") ? static_cast
<void> (0) : __assert_fail ("BO->getNumUses() <= 1 && \"distributeExtsAndCloneChain clones each BinaryOperator in \" \"UserChain, so no one should be used more than \" \"once\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp"
, 694, __PRETTY_FUNCTION__))
692 "distributeExtsAndCloneChain clones each BinaryOperator in "((BO->getNumUses() <= 1 && "distributeExtsAndCloneChain clones each BinaryOperator in "
"UserChain, so no one should be used more than " "once") ? static_cast
<void> (0) : __assert_fail ("BO->getNumUses() <= 1 && \"distributeExtsAndCloneChain clones each BinaryOperator in \" \"UserChain, so no one should be used more than \" \"once\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp"
, 694, __PRETTY_FUNCTION__))
693 "UserChain, so no one should be used more than "((BO->getNumUses() <= 1 && "distributeExtsAndCloneChain clones each BinaryOperator in "
"UserChain, so no one should be used more than " "once") ? static_cast
<void> (0) : __assert_fail ("BO->getNumUses() <= 1 && \"distributeExtsAndCloneChain clones each BinaryOperator in \" \"UserChain, so no one should be used more than \" \"once\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp"
, 694, __PRETTY_FUNCTION__))
694 "once")((BO->getNumUses() <= 1 && "distributeExtsAndCloneChain clones each BinaryOperator in "
"UserChain, so no one should be used more than " "once") ? static_cast
<void> (0) : __assert_fail ("BO->getNumUses() <= 1 && \"distributeExtsAndCloneChain clones each BinaryOperator in \" \"UserChain, so no one should be used more than \" \"once\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp"
, 694, __PRETTY_FUNCTION__))
;
695
696 unsigned OpNo = (BO->getOperand(0) == UserChain[ChainIndex - 1] ? 0 : 1);
697 assert(BO->getOperand(OpNo) == UserChain[ChainIndex - 1])((BO->getOperand(OpNo) == UserChain[ChainIndex - 1]) ? static_cast
<void> (0) : __assert_fail ("BO->getOperand(OpNo) == UserChain[ChainIndex - 1]"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp"
, 697, __PRETTY_FUNCTION__))
;
698 Value *NextInChain = removeConstOffset(ChainIndex - 1);
699 Value *TheOther = BO->getOperand(1 - OpNo);
700
701 // If NextInChain is 0 and not the LHS of a sub, we can simplify the
702 // sub-expression to be just TheOther.
703 if (ConstantInt *CI = dyn_cast<ConstantInt>(NextInChain)) {
704 if (CI->isZero() && !(BO->getOpcode() == Instruction::Sub && OpNo == 0))
705 return TheOther;
706 }
707
708 BinaryOperator::BinaryOps NewOp = BO->getOpcode();
709 if (BO->getOpcode() == Instruction::Or) {
710 // Rebuild "or" as "add", because "or" may be invalid for the new
711 // expression.
712 //
713 // For instance, given
714 // a | (b + 5) where a and b + 5 have no common bits,
715 // we can extract 5 as the constant offset.
716 //
717 // However, reusing the "or" in the new index would give us
718 // (a | b) + 5
719 // which does not equal a | (b + 5).
720 //
721 // Replacing the "or" with "add" is fine, because
722 // a | (b + 5) = a + (b + 5) = (a + b) + 5
723 NewOp = Instruction::Add;
724 }
725
726 BinaryOperator *NewBO;
727 if (OpNo == 0) {
728 NewBO = BinaryOperator::Create(NewOp, NextInChain, TheOther, "", IP);
729 } else {
730 NewBO = BinaryOperator::Create(NewOp, TheOther, NextInChain, "", IP);
731 }
732 NewBO->takeName(BO);
733 return NewBO;
734}
735
736Value *ConstantOffsetExtractor::Extract(Value *Idx, GetElementPtrInst *GEP,
737 User *&UserChainTail,
738 const DominatorTree *DT) {
739 ConstantOffsetExtractor Extractor(GEP, DT);
740 // Find a non-zero constant offset first.
741 APInt ConstantOffset =
742 Extractor.find(Idx, /* SignExtended */ false, /* ZeroExtended */ false,
743 GEP->isInBounds());
744 if (ConstantOffset == 0) {
745 UserChainTail = nullptr;
746 return nullptr;
747 }
748 // Separates the constant offset from the GEP index.
749 Value *IdxWithoutConstOffset = Extractor.rebuildWithoutConstOffset();
750 UserChainTail = Extractor.UserChain.back();
751 return IdxWithoutConstOffset;
752}
753
754int64_t ConstantOffsetExtractor::Find(Value *Idx, GetElementPtrInst *GEP,
755 const DominatorTree *DT) {
756 // If Idx is an index of an inbound GEP, Idx is guaranteed to be non-negative.
757 return ConstantOffsetExtractor(GEP, DT)
758 .find(Idx, /* SignExtended */ false, /* ZeroExtended */ false,
759 GEP->isInBounds())
760 .getSExtValue();
761}
762
763bool SeparateConstOffsetFromGEP::canonicalizeArrayIndicesToPointerSize(
764 GetElementPtrInst *GEP) {
765 bool Changed = false;
766 Type *IntPtrTy = DL->getIntPtrType(GEP->getType());
767 gep_type_iterator GTI = gep_type_begin(*GEP);
768 for (User::op_iterator I = GEP->op_begin() + 1, E = GEP->op_end();
769 I != E; ++I, ++GTI) {
770 // Skip struct member indices which must be i32.
771 if (GTI.isSequential()) {
772 if ((*I)->getType() != IntPtrTy) {
773 *I = CastInst::CreateIntegerCast(*I, IntPtrTy, true, "idxprom", GEP);
774 Changed = true;
775 }
776 }
777 }
778 return Changed;
779}
780
781int64_t
782SeparateConstOffsetFromGEP::accumulateByteOffset(GetElementPtrInst *GEP,
783 bool &NeedsExtraction) {
784 NeedsExtraction = false;
785 int64_t AccumulativeByteOffset = 0;
786 gep_type_iterator GTI = gep_type_begin(*GEP);
787 for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) {
788 if (GTI.isSequential()) {
789 // Tries to extract a constant offset from this GEP index.
790 int64_t ConstantOffset =
791 ConstantOffsetExtractor::Find(GEP->getOperand(I), GEP, DT);
792 if (ConstantOffset != 0) {
793 NeedsExtraction = true;
794 // A GEP may have multiple indices. We accumulate the extracted
795 // constant offset to a byte offset, and later offset the remainder of
796 // the original GEP with this byte offset.
797 AccumulativeByteOffset +=
798 ConstantOffset * DL->getTypeAllocSize(GTI.getIndexedType());
799 }
800 } else if (LowerGEP) {
801 StructType *StTy = GTI.getStructType();
802 uint64_t Field = cast<ConstantInt>(GEP->getOperand(I))->getZExtValue();
803 // Skip field 0 as the offset is always 0.
804 if (Field != 0) {
805 NeedsExtraction = true;
806 AccumulativeByteOffset +=
807 DL->getStructLayout(StTy)->getElementOffset(Field);
808 }
809 }
810 }
811 return AccumulativeByteOffset;
812}
813
814void SeparateConstOffsetFromGEP::lowerToSingleIndexGEPs(
815 GetElementPtrInst *Variadic, int64_t AccumulativeByteOffset) {
816 IRBuilder<> Builder(Variadic);
817 Type *IntPtrTy = DL->getIntPtrType(Variadic->getType());
818
819 Type *I8PtrTy =
820 Builder.getInt8PtrTy(Variadic->getType()->getPointerAddressSpace());
821 Value *ResultPtr = Variadic->getOperand(0);
822 Loop *L = LI->getLoopFor(Variadic->getParent());
823 // Check if the base is not loop invariant or used more than once.
824 bool isSwapCandidate =
825 L && L->isLoopInvariant(ResultPtr) &&
22
Assuming 'L' is null
826 !hasMoreThanOneUseInLoop(ResultPtr, L);
827 Value *FirstResult = nullptr;
828
829 if (ResultPtr->getType() != I8PtrTy)
23
Assuming the condition is false
24
Taking false branch
830 ResultPtr = Builder.CreateBitCast(ResultPtr, I8PtrTy);
831
832 gep_type_iterator GTI = gep_type_begin(*Variadic);
833 // Create an ugly GEP for each sequential index. We don't create GEPs for
834 // structure indices, as they are accumulated in the constant offset index.
835 for (unsigned I = 1, E = Variadic->getNumOperands(); I != E; ++I, ++GTI) {
25
Assuming 'I' is equal to 'E'
26
Loop condition is false. Execution continues on line 863
836 if (GTI.isSequential()) {
837 Value *Idx = Variadic->getOperand(I);
838 // Skip zero indices.
839 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx))
840 if (CI->isZero())
841 continue;
842
843 APInt ElementSize = APInt(IntPtrTy->getIntegerBitWidth(),
844 DL->getTypeAllocSize(GTI.getIndexedType()));
845 // Scale the index by element size.
846 if (ElementSize != 1) {
847 if (ElementSize.isPowerOf2()) {
848 Idx = Builder.CreateShl(
849 Idx, ConstantInt::get(IntPtrTy, ElementSize.logBase2()));
850 } else {
851 Idx = Builder.CreateMul(Idx, ConstantInt::get(IntPtrTy, ElementSize));
852 }
853 }
854 // Create an ugly GEP with a single index for each index.
855 ResultPtr =
856 Builder.CreateGEP(Builder.getInt8Ty(), ResultPtr, Idx, "uglygep");
857 if (FirstResult == nullptr)
858 FirstResult = ResultPtr;
859 }
860 }
861
862 // Create a GEP with the constant offset index.
863 if (AccumulativeByteOffset != 0) {
27
Assuming 'AccumulativeByteOffset' is not equal to 0
28
Taking true branch
864 Value *Offset = ConstantInt::get(IntPtrTy, AccumulativeByteOffset);
865 ResultPtr =
37
Value assigned to 'ResultPtr'
866 Builder.CreateGEP(Builder.getInt8Ty(), ResultPtr, Offset, "uglygep");
29
Calling 'IRBuilder::CreateGEP'
36
Returning from 'IRBuilder::CreateGEP'
867 } else
868 isSwapCandidate = false;
869
870 // If we created a GEP with constant index, and the base is loop invariant,
871 // then we swap the first one with it, so LICM can move constant GEP out
872 // later.
873 GetElementPtrInst *FirstGEP = dyn_cast_or_null<GetElementPtrInst>(FirstResult);
38
Assuming null pointer is passed into cast
874 GetElementPtrInst *SecondGEP = dyn_cast_or_null<GetElementPtrInst>(ResultPtr);
39
Assuming null pointer is passed into cast
40
Assuming pointer value is null
875 if (isSwapCandidate
40.1
'isSwapCandidate' is false
40.1
'isSwapCandidate' is false
&& isLegalToSwapOperand(FirstGEP, SecondGEP, L))
876 swapGEPOperand(FirstGEP, SecondGEP);
877
878 if (ResultPtr->getType() != Variadic->getType())
41
Called C++ object pointer is null
879 ResultPtr = Builder.CreateBitCast(ResultPtr, Variadic->getType());
880
881 Variadic->replaceAllUsesWith(ResultPtr);
882 Variadic->eraseFromParent();
883}
884
885void
886SeparateConstOffsetFromGEP::lowerToArithmetics(GetElementPtrInst *Variadic,
887 int64_t AccumulativeByteOffset) {
888 IRBuilder<> Builder(Variadic);
889 Type *IntPtrTy = DL->getIntPtrType(Variadic->getType());
890
891 Value *ResultPtr = Builder.CreatePtrToInt(Variadic->getOperand(0), IntPtrTy);
892 gep_type_iterator GTI = gep_type_begin(*Variadic);
893 // Create ADD/SHL/MUL arithmetic operations for each sequential indices. We
894 // don't create arithmetics for structure indices, as they are accumulated
895 // in the constant offset index.
896 for (unsigned I = 1, E = Variadic->getNumOperands(); I != E; ++I, ++GTI) {
897 if (GTI.isSequential()) {
898 Value *Idx = Variadic->getOperand(I);
899 // Skip zero indices.
900 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx))
901 if (CI->isZero())
902 continue;
903
904 APInt ElementSize = APInt(IntPtrTy->getIntegerBitWidth(),
905 DL->getTypeAllocSize(GTI.getIndexedType()));
906 // Scale the index by element size.
907 if (ElementSize != 1) {
908 if (ElementSize.isPowerOf2()) {
909 Idx = Builder.CreateShl(
910 Idx, ConstantInt::get(IntPtrTy, ElementSize.logBase2()));
911 } else {
912 Idx = Builder.CreateMul(Idx, ConstantInt::get(IntPtrTy, ElementSize));
913 }
914 }
915 // Create an ADD for each index.
916 ResultPtr = Builder.CreateAdd(ResultPtr, Idx);
917 }
918 }
919
920 // Create an ADD for the constant offset index.
921 if (AccumulativeByteOffset != 0) {
922 ResultPtr = Builder.CreateAdd(
923 ResultPtr, ConstantInt::get(IntPtrTy, AccumulativeByteOffset));
924 }
925
926 ResultPtr = Builder.CreateIntToPtr(ResultPtr, Variadic->getType());
927 Variadic->replaceAllUsesWith(ResultPtr);
928 Variadic->eraseFromParent();
929}
930
931bool SeparateConstOffsetFromGEP::splitGEP(GetElementPtrInst *GEP) {
932 // Skip vector GEPs.
933 if (GEP->getType()->isVectorTy())
9
Taking false branch
934 return false;
935
936 // The backend can already nicely handle the case where all indices are
937 // constant.
938 if (GEP->hasAllConstantIndices())
10
Assuming the condition is false
11
Taking false branch
939 return false;
940
941 bool Changed = canonicalizeArrayIndicesToPointerSize(GEP);
942
943 bool NeedsExtraction;
944 int64_t AccumulativeByteOffset = accumulateByteOffset(GEP, NeedsExtraction);
945
946 if (!NeedsExtraction
11.1
'NeedsExtraction' is true
11.1
'NeedsExtraction' is true
)
12
Taking false branch
947 return Changed;
948
949 TargetTransformInfo &TTI =
950 getAnalysis<TargetTransformInfoWrapperPass>().getTTI(*GEP->getFunction());
951
952 // If LowerGEP is disabled, before really splitting the GEP, check whether the
953 // backend supports the addressing mode we are about to produce. If no, this
954 // splitting probably won't be beneficial.
955 // If LowerGEP is enabled, even the extracted constant offset can not match
956 // the addressing mode, we can still do optimizations to other lowered parts
957 // of variable indices. Therefore, we don't check for addressing modes in that
958 // case.
959 if (!LowerGEP
12.1
Field 'LowerGEP' is true
12.1
Field 'LowerGEP' is true
) {
13
Taking false branch
960 unsigned AddrSpace = GEP->getPointerAddressSpace();
961 if (!TTI.isLegalAddressingMode(GEP->getResultElementType(),
962 /*BaseGV=*/nullptr, AccumulativeByteOffset,
963 /*HasBaseReg=*/true, /*Scale=*/0,
964 AddrSpace)) {
965 return Changed;
966 }
967 }
968
969 // Remove the constant offset in each sequential index. The resultant GEP
970 // computes the variadic base.
971 // Notice that we don't remove struct field indices here. If LowerGEP is
972 // disabled, a structure index is not accumulated and we still use the old
973 // one. If LowerGEP is enabled, a structure index is accumulated in the
974 // constant offset. LowerToSingleIndexGEPs or lowerToArithmetics will later
975 // handle the constant offset and won't need a new structure index.
976 gep_type_iterator GTI = gep_type_begin(*GEP);
977 for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) {
14
Loop condition is true. Entering loop body
17
Loop condition is false. Execution continues on line 1015
978 if (GTI.isSequential()) {
15
Assuming the condition is false
16
Taking false branch
979 // Splits this GEP index into a variadic part and a constant offset, and
980 // uses the variadic part as the new index.
981 Value *OldIdx = GEP->getOperand(I);
982 User *UserChainTail;
983 Value *NewIdx =
984 ConstantOffsetExtractor::Extract(OldIdx, GEP, UserChainTail, DT);
985 if (NewIdx != nullptr) {
986 // Switches to the index with the constant offset removed.
987 GEP->setOperand(I, NewIdx);
988 // After switching to the new index, we can garbage-collect UserChain
989 // and the old index if they are not used.
990 RecursivelyDeleteTriviallyDeadInstructions(UserChainTail);
991 RecursivelyDeleteTriviallyDeadInstructions(OldIdx);
992 }
993 }
994 }
995
996 // Clear the inbounds attribute because the new index may be off-bound.
997 // e.g.,
998 //
999 // b = add i64 a, 5
1000 // addr = gep inbounds float, float* p, i64 b
1001 //
1002 // is transformed to:
1003 //
1004 // addr2 = gep float, float* p, i64 a ; inbounds removed
1005 // addr = gep inbounds float, float* addr2, i64 5
1006 //
1007 // If a is -4, although the old index b is in bounds, the new index a is
1008 // off-bound. http://llvm.org/docs/LangRef.html#id181 says "if the
1009 // inbounds keyword is not present, the offsets are added to the base
1010 // address with silently-wrapping two's complement arithmetic".
1011 // Therefore, the final code will be a semantically equivalent.
1012 //
1013 // TODO(jingyue): do some range analysis to keep as many inbounds as
1014 // possible. GEPs with inbounds are more friendly to alias analysis.
1015 bool GEPWasInBounds = GEP->isInBounds();
1016 GEP->setIsInBounds(false);
1017
1018 // Lowers a GEP to either GEPs with a single index or arithmetic operations.
1019 if (LowerGEP
17.1
Field 'LowerGEP' is true
17.1
Field 'LowerGEP' is true
) {
18
Taking true branch
1020 // As currently BasicAA does not analyze ptrtoint/inttoptr, do not lower to
1021 // arithmetic operations if the target uses alias analysis in codegen.
1022 if (TTI.useAA())
19
Assuming the condition is true
20
Taking true branch
1023 lowerToSingleIndexGEPs(GEP, AccumulativeByteOffset);
21
Calling 'SeparateConstOffsetFromGEP::lowerToSingleIndexGEPs'
1024 else
1025 lowerToArithmetics(GEP, AccumulativeByteOffset);
1026 return true;
1027 }
1028
1029 // No need to create another GEP if the accumulative byte offset is 0.
1030 if (AccumulativeByteOffset == 0)
1031 return true;
1032
1033 // Offsets the base with the accumulative byte offset.
1034 //
1035 // %gep ; the base
1036 // ... %gep ...
1037 //
1038 // => add the offset
1039 //
1040 // %gep2 ; clone of %gep
1041 // %new.gep = gep %gep2, <offset / sizeof(*%gep)>
1042 // %gep ; will be removed
1043 // ... %gep ...
1044 //
1045 // => replace all uses of %gep with %new.gep and remove %gep
1046 //
1047 // %gep2 ; clone of %gep
1048 // %new.gep = gep %gep2, <offset / sizeof(*%gep)>
1049 // ... %new.gep ...
1050 //
1051 // If AccumulativeByteOffset is not a multiple of sizeof(*%gep), we emit an
1052 // uglygep (http://llvm.org/docs/GetElementPtr.html#what-s-an-uglygep):
1053 // bitcast %gep2 to i8*, add the offset, and bitcast the result back to the
1054 // type of %gep.
1055 //
1056 // %gep2 ; clone of %gep
1057 // %0 = bitcast %gep2 to i8*
1058 // %uglygep = gep %0, <offset>
1059 // %new.gep = bitcast %uglygep to <type of %gep>
1060 // ... %new.gep ...
1061 Instruction *NewGEP = GEP->clone();
1062 NewGEP->insertBefore(GEP);
1063
1064 // Per ANSI C standard, signed / unsigned = unsigned and signed % unsigned =
1065 // unsigned.. Therefore, we cast ElementTypeSizeOfGEP to signed because it is
1066 // used with unsigned integers later.
1067 int64_t ElementTypeSizeOfGEP = static_cast<int64_t>(
1068 DL->getTypeAllocSize(GEP->getResultElementType()));
1069 Type *IntPtrTy = DL->getIntPtrType(GEP->getType());
1070 if (AccumulativeByteOffset % ElementTypeSizeOfGEP == 0) {
1071 // Very likely. As long as %gep is naturally aligned, the byte offset we
1072 // extracted should be a multiple of sizeof(*%gep).
1073 int64_t Index = AccumulativeByteOffset / ElementTypeSizeOfGEP;
1074 NewGEP = GetElementPtrInst::Create(GEP->getResultElementType(), NewGEP,
1075 ConstantInt::get(IntPtrTy, Index, true),
1076 GEP->getName(), GEP);
1077 NewGEP->copyMetadata(*GEP);
1078 // Inherit the inbounds attribute of the original GEP.
1079 cast<GetElementPtrInst>(NewGEP)->setIsInBounds(GEPWasInBounds);
1080 } else {
1081 // Unlikely but possible. For example,
1082 // #pragma pack(1)
1083 // struct S {
1084 // int a[3];
1085 // int64 b[8];
1086 // };
1087 // #pragma pack()
1088 //
1089 // Suppose the gep before extraction is &s[i + 1].b[j + 3]. After
1090 // extraction, it becomes &s[i].b[j] and AccumulativeByteOffset is
1091 // sizeof(S) + 3 * sizeof(int64) = 100, which is not a multiple of
1092 // sizeof(int64).
1093 //
1094 // Emit an uglygep in this case.
1095 Type *I8PtrTy = Type::getInt8PtrTy(GEP->getContext(),
1096 GEP->getPointerAddressSpace());
1097 NewGEP = new BitCastInst(NewGEP, I8PtrTy, "", GEP);
1098 NewGEP = GetElementPtrInst::Create(
1099 Type::getInt8Ty(GEP->getContext()), NewGEP,
1100 ConstantInt::get(IntPtrTy, AccumulativeByteOffset, true), "uglygep",
1101 GEP);
1102 NewGEP->copyMetadata(*GEP);
1103 // Inherit the inbounds attribute of the original GEP.
1104 cast<GetElementPtrInst>(NewGEP)->setIsInBounds(GEPWasInBounds);
1105 if (GEP->getType() != I8PtrTy)
1106 NewGEP = new BitCastInst(NewGEP, GEP->getType(), GEP->getName(), GEP);
1107 }
1108
1109 GEP->replaceAllUsesWith(NewGEP);
1110 GEP->eraseFromParent();
1111
1112 return true;
1113}
1114
1115bool SeparateConstOffsetFromGEP::runOnFunction(Function &F) {
1116 if (skipFunction(F))
1
Assuming the condition is false
2
Taking false branch
1117 return false;
1118
1119 if (DisableSeparateConstOffsetFromGEP)
3
Assuming the condition is false
4
Taking false branch
1120 return false;
1121
1122 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1123 SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
1124 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
1125 TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
1126 bool Changed = false;
1127 for (BasicBlock &B : F) {
1128 for (BasicBlock::iterator I = B.begin(), IE = B.end(); I != IE;)
5
Loop condition is true. Entering loop body
1129 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I++))
6
Assuming 'GEP' is non-null
7
Taking true branch
1130 Changed |= splitGEP(GEP);
8
Calling 'SeparateConstOffsetFromGEP::splitGEP'
1131 // No need to split GEP ConstantExprs because all its indices are constant
1132 // already.
1133 }
1134
1135 Changed |= reuniteExts(F);
1136
1137 if (VerifyNoDeadCode)
1138 verifyNoDeadCode(F);
1139
1140 return Changed;
1141}
1142
1143Instruction *SeparateConstOffsetFromGEP::findClosestMatchingDominator(
1144 const SCEV *Key, Instruction *Dominatee) {
1145 auto Pos = DominatingExprs.find(Key);
1146 if (Pos == DominatingExprs.end())
1147 return nullptr;
1148
1149 auto &Candidates = Pos->second;
1150 // Because we process the basic blocks in pre-order of the dominator tree, a
1151 // candidate that doesn't dominate the current instruction won't dominate any
1152 // future instruction either. Therefore, we pop it out of the stack. This
1153 // optimization makes the algorithm O(n).
1154 while (!Candidates.empty()) {
1155 Instruction *Candidate = Candidates.back();
1156 if (DT->dominates(Candidate, Dominatee))
1157 return Candidate;
1158 Candidates.pop_back();
1159 }
1160 return nullptr;
1161}
1162
1163bool SeparateConstOffsetFromGEP::reuniteExts(Instruction *I) {
1164 if (!SE->isSCEVable(I->getType()))
1165 return false;
1166
1167 // Dom: LHS+RHS
1168 // I: sext(LHS)+sext(RHS)
1169 // If Dom can't sign overflow and Dom dominates I, optimize I to sext(Dom).
1170 // TODO: handle zext
1171 Value *LHS = nullptr, *RHS = nullptr;
1172 if (match(I, m_Add(m_SExt(m_Value(LHS)), m_SExt(m_Value(RHS)))) ||
1173 match(I, m_Sub(m_SExt(m_Value(LHS)), m_SExt(m_Value(RHS))))) {
1174 if (LHS->getType() == RHS->getType()) {
1175 const SCEV *Key =
1176 SE->getAddExpr(SE->getUnknown(LHS), SE->getUnknown(RHS));
1177 if (auto *Dom = findClosestMatchingDominator(Key, I)) {
1178 Instruction *NewSExt = new SExtInst(Dom, I->getType(), "", I);
1179 NewSExt->takeName(I);
1180 I->replaceAllUsesWith(NewSExt);
1181 RecursivelyDeleteTriviallyDeadInstructions(I);
1182 return true;
1183 }
1184 }
1185 }
1186
1187 // Add I to DominatingExprs if it's an add/sub that can't sign overflow.
1188 if (match(I, m_NSWAdd(m_Value(LHS), m_Value(RHS))) ||
1189 match(I, m_NSWSub(m_Value(LHS), m_Value(RHS)))) {
1190 if (programUndefinedIfFullPoison(I)) {
1191 const SCEV *Key =
1192 SE->getAddExpr(SE->getUnknown(LHS), SE->getUnknown(RHS));
1193 DominatingExprs[Key].push_back(I);
1194 }
1195 }
1196 return false;
1197}
1198
1199bool SeparateConstOffsetFromGEP::reuniteExts(Function &F) {
1200 bool Changed = false;
1201 DominatingExprs.clear();
1202 for (const auto Node : depth_first(DT)) {
1203 BasicBlock *BB = Node->getBlock();
1204 for (auto I = BB->begin(); I != BB->end(); ) {
1205 Instruction *Cur = &*I++;
1206 Changed |= reuniteExts(Cur);
1207 }
1208 }
1209 return Changed;
1210}
1211
1212void SeparateConstOffsetFromGEP::verifyNoDeadCode(Function &F) {
1213 for (BasicBlock &B : F) {
1214 for (Instruction &I : B) {
1215 if (isInstructionTriviallyDead(&I)) {
1216 std::string ErrMessage;
1217 raw_string_ostream RSO(ErrMessage);
1218 RSO << "Dead instruction detected!\n" << I << "\n";
1219 llvm_unreachable(RSO.str().c_str())::llvm::llvm_unreachable_internal(RSO.str().c_str(), "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp"
, 1219)
;
1220 }
1221 }
1222 }
1223}
1224
1225bool SeparateConstOffsetFromGEP::isLegalToSwapOperand(
1226 GetElementPtrInst *FirstGEP, GetElementPtrInst *SecondGEP, Loop *CurLoop) {
1227 if (!FirstGEP || !FirstGEP->hasOneUse())
1228 return false;
1229
1230 if (!SecondGEP || FirstGEP->getParent() != SecondGEP->getParent())
1231 return false;
1232
1233 if (FirstGEP == SecondGEP)
1234 return false;
1235
1236 unsigned FirstNum = FirstGEP->getNumOperands();
1237 unsigned SecondNum = SecondGEP->getNumOperands();
1238 // Give up if the number of operands are not 2.
1239 if (FirstNum != SecondNum || FirstNum != 2)
1240 return false;
1241
1242 Value *FirstBase = FirstGEP->getOperand(0);
1243 Value *SecondBase = SecondGEP->getOperand(0);
1244 Value *FirstOffset = FirstGEP->getOperand(1);
1245 // Give up if the index of the first GEP is loop invariant.
1246 if (CurLoop->isLoopInvariant(FirstOffset))
1247 return false;
1248
1249 // Give up if base doesn't have same type.
1250 if (FirstBase->getType() != SecondBase->getType())
1251 return false;
1252
1253 Instruction *FirstOffsetDef = dyn_cast<Instruction>(FirstOffset);
1254
1255 // Check if the second operand of first GEP has constant coefficient.
1256 // For an example, for the following code, we won't gain anything by
1257 // hoisting the second GEP out because the second GEP can be folded away.
1258 // %scevgep.sum.ur159 = add i64 %idxprom48.ur, 256
1259 // %67 = shl i64 %scevgep.sum.ur159, 2
1260 // %uglygep160 = getelementptr i8* %65, i64 %67
1261 // %uglygep161 = getelementptr i8* %uglygep160, i64 -1024
1262
1263 // Skip constant shift instruction which may be generated by Splitting GEPs.
1264 if (FirstOffsetDef && FirstOffsetDef->isShift() &&
1265 isa<ConstantInt>(FirstOffsetDef->getOperand(1)))
1266 FirstOffsetDef = dyn_cast<Instruction>(FirstOffsetDef->getOperand(0));
1267
1268 // Give up if FirstOffsetDef is an Add or Sub with constant.
1269 // Because it may not profitable at all due to constant folding.
1270 if (FirstOffsetDef)
1271 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(FirstOffsetDef)) {
1272 unsigned opc = BO->getOpcode();
1273 if ((opc == Instruction::Add || opc == Instruction::Sub) &&
1274 (isa<ConstantInt>(BO->getOperand(0)) ||
1275 isa<ConstantInt>(BO->getOperand(1))))
1276 return false;
1277 }
1278 return true;
1279}
1280
1281bool SeparateConstOffsetFromGEP::hasMoreThanOneUseInLoop(Value *V, Loop *L) {
1282 int UsesInLoop = 0;
1283 for (User *U : V->users()) {
1284 if (Instruction *User = dyn_cast<Instruction>(U))
1285 if (L->contains(User))
1286 if (++UsesInLoop > 1)
1287 return true;
1288 }
1289 return false;
1290}
1291
1292void SeparateConstOffsetFromGEP::swapGEPOperand(GetElementPtrInst *First,
1293 GetElementPtrInst *Second) {
1294 Value *Offset1 = First->getOperand(1);
1295 Value *Offset2 = Second->getOperand(1);
1296 First->setOperand(1, Offset2);
1297 Second->setOperand(1, Offset1);
1298
1299 // We changed p+o+c to p+c+o, p+c may not be inbound anymore.
1300 const DataLayout &DAL = First->getModule()->getDataLayout();
1301 APInt Offset(DAL.getIndexSizeInBits(
1302 cast<PointerType>(First->getType())->getAddressSpace()),
1303 0);
1304 Value *NewBase =
1305 First->stripAndAccumulateInBoundsConstantOffsets(DAL, Offset);
1306 uint64_t ObjectSize;
1307 if (!getObjectSize(NewBase, ObjectSize, DAL, TLI) ||
1308 Offset.ugt(ObjectSize)) {
1309 First->setIsInBounds(false);
1310 Second->setIsInBounds(false);
1311 } else
1312 First->setIsInBounds(true);
1313}

/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/IRBuilder.h

1//===- llvm/IRBuilder.h - Builder for LLVM Instructions ---------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the IRBuilder class, which is used as a convenient way
10// to create LLVM instructions with a consistent and simplified interface.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_IR_IRBUILDER_H
15#define LLVM_IR_IRBUILDER_H
16
17#include "llvm-c/Types.h"
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/None.h"
20#include "llvm/ADT/StringRef.h"
21#include "llvm/ADT/Twine.h"
22#include "llvm/IR/BasicBlock.h"
23#include "llvm/IR/Constant.h"
24#include "llvm/IR/ConstantFolder.h"
25#include "llvm/IR/Constants.h"
26#include "llvm/IR/DataLayout.h"
27#include "llvm/IR/DebugLoc.h"
28#include "llvm/IR/DerivedTypes.h"
29#include "llvm/IR/Function.h"
30#include "llvm/IR/GlobalVariable.h"
31#include "llvm/IR/InstrTypes.h"
32#include "llvm/IR/Instruction.h"
33#include "llvm/IR/Instructions.h"
34#include "llvm/IR/IntrinsicInst.h"
35#include "llvm/IR/LLVMContext.h"
36#include "llvm/IR/Module.h"
37#include "llvm/IR/Operator.h"
38#include "llvm/IR/Type.h"
39#include "llvm/IR/Value.h"
40#include "llvm/IR/ValueHandle.h"
41#include "llvm/Support/AtomicOrdering.h"
42#include "llvm/Support/CBindingWrapping.h"
43#include "llvm/Support/Casting.h"
44#include <cassert>
45#include <cstddef>
46#include <cstdint>
47#include <functional>
48#include <utility>
49
50namespace llvm {
51
52class APInt;
53class MDNode;
54class Use;
55
56/// This provides the default implementation of the IRBuilder
57/// 'InsertHelper' method that is called whenever an instruction is created by
58/// IRBuilder and needs to be inserted.
59///
60/// By default, this inserts the instruction at the insertion point.
61class IRBuilderDefaultInserter {
62protected:
63 void InsertHelper(Instruction *I, const Twine &Name,
64 BasicBlock *BB, BasicBlock::iterator InsertPt) const {
65 if (BB) BB->getInstList().insert(InsertPt, I);
66 I->setName(Name);
67 }
68};
69
70/// Provides an 'InsertHelper' that calls a user-provided callback after
71/// performing the default insertion.
72class IRBuilderCallbackInserter : IRBuilderDefaultInserter {
73 std::function<void(Instruction *)> Callback;
74
75public:
76 IRBuilderCallbackInserter(std::function<void(Instruction *)> Callback)
77 : Callback(std::move(Callback)) {}
78
79protected:
80 void InsertHelper(Instruction *I, const Twine &Name,
81 BasicBlock *BB, BasicBlock::iterator InsertPt) const {
82 IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt);
83 Callback(I);
84 }
85};
86
87/// Common base class shared among various IRBuilders.
88class IRBuilderBase {
89 DebugLoc CurDbgLocation;
90
91protected:
92 BasicBlock *BB;
93 BasicBlock::iterator InsertPt;
94 LLVMContext &Context;
95
96 MDNode *DefaultFPMathTag;
97 FastMathFlags FMF;
98
99 bool IsFPConstrained;
100 fp::ExceptionBehavior DefaultConstrainedExcept;
101 fp::RoundingMode DefaultConstrainedRounding;
102
103 ArrayRef<OperandBundleDef> DefaultOperandBundles;
104
105public:
106 IRBuilderBase(LLVMContext &context, MDNode *FPMathTag = nullptr,
107 ArrayRef<OperandBundleDef> OpBundles = None)
108 : Context(context), DefaultFPMathTag(FPMathTag), IsFPConstrained(false),
109 DefaultConstrainedExcept(fp::ebStrict),
110 DefaultConstrainedRounding(fp::rmDynamic),
111 DefaultOperandBundles(OpBundles) {
112 ClearInsertionPoint();
113 }
114
115 //===--------------------------------------------------------------------===//
116 // Builder configuration methods
117 //===--------------------------------------------------------------------===//
118
119 /// Clear the insertion point: created instructions will not be
120 /// inserted into a block.
121 void ClearInsertionPoint() {
122 BB = nullptr;
123 InsertPt = BasicBlock::iterator();
124 }
125
126 BasicBlock *GetInsertBlock() const { return BB; }
127 BasicBlock::iterator GetInsertPoint() const { return InsertPt; }
128 LLVMContext &getContext() const { return Context; }
129
130 /// This specifies that created instructions should be appended to the
131 /// end of the specified block.
132 void SetInsertPoint(BasicBlock *TheBB) {
133 BB = TheBB;
134 InsertPt = BB->end();
135 }
136
137 /// This specifies that created instructions should be inserted before
138 /// the specified instruction.
139 void SetInsertPoint(Instruction *I) {
140 BB = I->getParent();
141 InsertPt = I->getIterator();
142 assert(InsertPt != BB->end() && "Can't read debug loc from end()")((InsertPt != BB->end() && "Can't read debug loc from end()"
) ? static_cast<void> (0) : __assert_fail ("InsertPt != BB->end() && \"Can't read debug loc from end()\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/IRBuilder.h"
, 142, __PRETTY_FUNCTION__))
;
143 SetCurrentDebugLocation(I->getDebugLoc());
144 }
145
146 /// This specifies that created instructions should be inserted at the
147 /// specified point.
148 void SetInsertPoint(BasicBlock *TheBB, BasicBlock::iterator IP) {
149 BB = TheBB;
150 InsertPt = IP;
151 if (IP != TheBB->end())
152 SetCurrentDebugLocation(IP->getDebugLoc());
153 }
154
155 /// Set location information used by debugging information.
156 void SetCurrentDebugLocation(DebugLoc L) { CurDbgLocation = std::move(L); }
157
158 /// Get location information used by debugging information.
159 const DebugLoc &getCurrentDebugLocation() const { return CurDbgLocation; }
160
161 /// If this builder has a current debug location, set it on the
162 /// specified instruction.
163 void SetInstDebugLocation(Instruction *I) const {
164 if (CurDbgLocation)
165 I->setDebugLoc(CurDbgLocation);
166 }
167
168 /// Get the return type of the current function that we're emitting
169 /// into.
170 Type *getCurrentFunctionReturnType() const;
171
172 /// InsertPoint - A saved insertion point.
173 class InsertPoint {
174 BasicBlock *Block = nullptr;
175 BasicBlock::iterator Point;
176
177 public:
178 /// Creates a new insertion point which doesn't point to anything.
179 InsertPoint() = default;
180
181 /// Creates a new insertion point at the given location.
182 InsertPoint(BasicBlock *InsertBlock, BasicBlock::iterator InsertPoint)
183 : Block(InsertBlock), Point(InsertPoint) {}
184
185 /// Returns true if this insert point is set.
186 bool isSet() const { return (Block != nullptr); }
187
188 BasicBlock *getBlock() const { return Block; }
189 BasicBlock::iterator getPoint() const { return Point; }
190 };
191
192 /// Returns the current insert point.
193 InsertPoint saveIP() const {
194 return InsertPoint(GetInsertBlock(), GetInsertPoint());
195 }
196
197 /// Returns the current insert point, clearing it in the process.
198 InsertPoint saveAndClearIP() {
199 InsertPoint IP(GetInsertBlock(), GetInsertPoint());
200 ClearInsertionPoint();
201 return IP;
202 }
203
204 /// Sets the current insert point to a previously-saved location.
205 void restoreIP(InsertPoint IP) {
206 if (IP.isSet())
207 SetInsertPoint(IP.getBlock(), IP.getPoint());
208 else
209 ClearInsertionPoint();
210 }
211
212 /// Get the floating point math metadata being used.
213 MDNode *getDefaultFPMathTag() const { return DefaultFPMathTag; }
214
215 /// Get the flags to be applied to created floating point ops
216 FastMathFlags getFastMathFlags() const { return FMF; }
217
218 /// Clear the fast-math flags.
219 void clearFastMathFlags() { FMF.clear(); }
220
221 /// Set the floating point math metadata to be used.
222 void setDefaultFPMathTag(MDNode *FPMathTag) { DefaultFPMathTag = FPMathTag; }
223
224 /// Set the fast-math flags to be used with generated fp-math operators
225 void setFastMathFlags(FastMathFlags NewFMF) { FMF = NewFMF; }
226
227 /// Enable/Disable use of constrained floating point math. When
228 /// enabled the CreateF<op>() calls instead create constrained
229 /// floating point intrinsic calls. Fast math flags are unaffected
230 /// by this setting.
231 void setIsFPConstrained(bool IsCon) { IsFPConstrained = IsCon; }
232
233 /// Query for the use of constrained floating point math
234 bool getIsFPConstrained() { return IsFPConstrained; }
235
236 /// Set the exception handling to be used with constrained floating point
237 void setDefaultConstrainedExcept(fp::ExceptionBehavior NewExcept) {
238 DefaultConstrainedExcept = NewExcept;
239 }
240
241 /// Set the rounding mode handling to be used with constrained floating point
242 void setDefaultConstrainedRounding(fp::RoundingMode NewRounding) {
243 DefaultConstrainedRounding = NewRounding;
244 }
245
246 /// Get the exception handling used with constrained floating point
247 fp::ExceptionBehavior getDefaultConstrainedExcept() {
248 return DefaultConstrainedExcept;
249 }
250
251 /// Get the rounding mode handling used with constrained floating point
252 fp::RoundingMode getDefaultConstrainedRounding() {
253 return DefaultConstrainedRounding;
254 }
255
256 void setConstrainedFPFunctionAttr() {
257 assert(BB && "Must have a basic block to set any function attributes!")((BB && "Must have a basic block to set any function attributes!"
) ? static_cast<void> (0) : __assert_fail ("BB && \"Must have a basic block to set any function attributes!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/IRBuilder.h"
, 257, __PRETTY_FUNCTION__))
;
258
259 Function *F = BB->getParent();
260 if (!F->hasFnAttribute(Attribute::StrictFP)) {
261 F->addFnAttr(Attribute::StrictFP);
262 }
263 }
264
265 void setConstrainedFPCallAttr(CallInst *I) {
266 if (!I->hasFnAttr(Attribute::StrictFP))
267 I->addAttribute(AttributeList::FunctionIndex, Attribute::StrictFP);
268 }
269
270 //===--------------------------------------------------------------------===//
271 // RAII helpers.
272 //===--------------------------------------------------------------------===//
273
274 // RAII object that stores the current insertion point and restores it
275 // when the object is destroyed. This includes the debug location.
276 class InsertPointGuard {
277 IRBuilderBase &Builder;
278 AssertingVH<BasicBlock> Block;
279 BasicBlock::iterator Point;
280 DebugLoc DbgLoc;
281
282 public:
283 InsertPointGuard(IRBuilderBase &B)
284 : Builder(B), Block(B.GetInsertBlock()), Point(B.GetInsertPoint()),
285 DbgLoc(B.getCurrentDebugLocation()) {}
286
287 InsertPointGuard(const InsertPointGuard &) = delete;
288 InsertPointGuard &operator=(const InsertPointGuard &) = delete;
289
290 ~InsertPointGuard() {
291 Builder.restoreIP(InsertPoint(Block, Point));
292 Builder.SetCurrentDebugLocation(DbgLoc);
293 }
294 };
295
296 // RAII object that stores the current fast math settings and restores
297 // them when the object is destroyed.
298 class FastMathFlagGuard {
299 IRBuilderBase &Builder;
300 FastMathFlags FMF;
301 MDNode *FPMathTag;
302
303 public:
304 FastMathFlagGuard(IRBuilderBase &B)
305 : Builder(B), FMF(B.FMF), FPMathTag(B.DefaultFPMathTag) {}
306
307 FastMathFlagGuard(const FastMathFlagGuard &) = delete;
308 FastMathFlagGuard &operator=(const FastMathFlagGuard &) = delete;
309
310 ~FastMathFlagGuard() {
311 Builder.FMF = FMF;
312 Builder.DefaultFPMathTag = FPMathTag;
313 }
314 };
315
316 //===--------------------------------------------------------------------===//
317 // Miscellaneous creation methods.
318 //===--------------------------------------------------------------------===//
319
320 /// Make a new global variable with initializer type i8*
321 ///
322 /// Make a new global variable with an initializer that has array of i8 type
323 /// filled in with the null terminated string value specified. The new global
324 /// variable will be marked mergable with any others of the same contents. If
325 /// Name is specified, it is the name of the global variable created.
326 GlobalVariable *CreateGlobalString(StringRef Str, const Twine &Name = "",
327 unsigned AddressSpace = 0);
328
329 /// Get a constant value representing either true or false.
330 ConstantInt *getInt1(bool V) {
331 return ConstantInt::get(getInt1Ty(), V);
332 }
333
334 /// Get the constant value for i1 true.
335 ConstantInt *getTrue() {
336 return ConstantInt::getTrue(Context);
337 }
338
339 /// Get the constant value for i1 false.
340 ConstantInt *getFalse() {
341 return ConstantInt::getFalse(Context);
342 }
343
344 /// Get a constant 8-bit value.
345 ConstantInt *getInt8(uint8_t C) {
346 return ConstantInt::get(getInt8Ty(), C);
347 }
348
349 /// Get a constant 16-bit value.
350 ConstantInt *getInt16(uint16_t C) {
351 return ConstantInt::get(getInt16Ty(), C);
352 }
353
354 /// Get a constant 32-bit value.
355 ConstantInt *getInt32(uint32_t C) {
356 return ConstantInt::get(getInt32Ty(), C);
357 }
358
359 /// Get a constant 64-bit value.
360 ConstantInt *getInt64(uint64_t C) {
361 return ConstantInt::get(getInt64Ty(), C);
362 }
363
364 /// Get a constant N-bit value, zero extended or truncated from
365 /// a 64-bit value.
366 ConstantInt *getIntN(unsigned N, uint64_t C) {
367 return ConstantInt::get(getIntNTy(N), C);
368 }
369
370 /// Get a constant integer value.
371 ConstantInt *getInt(const APInt &AI) {
372 return ConstantInt::get(Context, AI);
373 }
374
375 //===--------------------------------------------------------------------===//
376 // Type creation methods
377 //===--------------------------------------------------------------------===//
378
379 /// Fetch the type representing a single bit
380 IntegerType *getInt1Ty() {
381 return Type::getInt1Ty(Context);
382 }
383
384 /// Fetch the type representing an 8-bit integer.
385 IntegerType *getInt8Ty() {
386 return Type::getInt8Ty(Context);
387 }
388
389 /// Fetch the type representing a 16-bit integer.
390 IntegerType *getInt16Ty() {
391 return Type::getInt16Ty(Context);
392 }
393
394 /// Fetch the type representing a 32-bit integer.
395 IntegerType *getInt32Ty() {
396 return Type::getInt32Ty(Context);
397 }
398
399 /// Fetch the type representing a 64-bit integer.
400 IntegerType *getInt64Ty() {
401 return Type::getInt64Ty(Context);
402 }
403
404 /// Fetch the type representing a 128-bit integer.
405 IntegerType *getInt128Ty() { return Type::getInt128Ty(Context); }
406
407 /// Fetch the type representing an N-bit integer.
408 IntegerType *getIntNTy(unsigned N) {
409 return Type::getIntNTy(Context, N);
410 }
411
412 /// Fetch the type representing a 16-bit floating point value.
413 Type *getHalfTy() {
414 return Type::getHalfTy(Context);
415 }
416
417 /// Fetch the type representing a 32-bit floating point value.
418 Type *getFloatTy() {
419 return Type::getFloatTy(Context);
420 }
421
422 /// Fetch the type representing a 64-bit floating point value.
423 Type *getDoubleTy() {
424 return Type::getDoubleTy(Context);
425 }
426
427 /// Fetch the type representing void.
428 Type *getVoidTy() {
429 return Type::getVoidTy(Context);
430 }
431
432 /// Fetch the type representing a pointer to an 8-bit integer value.
433 PointerType *getInt8PtrTy(unsigned AddrSpace = 0) {
434 return Type::getInt8PtrTy(Context, AddrSpace);
435 }
436
437 /// Fetch the type representing a pointer to an integer value.
438 IntegerType *getIntPtrTy(const DataLayout &DL, unsigned AddrSpace = 0) {
439 return DL.getIntPtrType(Context, AddrSpace);
440 }
441
442 //===--------------------------------------------------------------------===//
443 // Intrinsic creation methods
444 //===--------------------------------------------------------------------===//
445
446 /// Create and insert a memset to the specified pointer and the
447 /// specified value.
448 ///
449 /// If the pointer isn't an i8*, it will be converted. If a TBAA tag is
450 /// specified, it will be added to the instruction. Likewise with alias.scope
451 /// and noalias tags.
452 CallInst *CreateMemSet(Value *Ptr, Value *Val, uint64_t Size,
453 MaybeAlign Align, bool isVolatile = false,
454 MDNode *TBAATag = nullptr, MDNode *ScopeTag = nullptr,
455 MDNode *NoAliasTag = nullptr) {
456 return CreateMemSet(Ptr, Val, getInt64(Size), Align, isVolatile,
457 TBAATag, ScopeTag, NoAliasTag);
458 }
459
460 CallInst *CreateMemSet(Value *Ptr, Value *Val, Value *Size, MaybeAlign Align,
461 bool isVolatile = false, MDNode *TBAATag = nullptr,
462 MDNode *ScopeTag = nullptr,
463 MDNode *NoAliasTag = nullptr);
464
465 /// Create and insert an element unordered-atomic memset of the region of
466 /// memory starting at the given pointer to the given value.
467 ///
468 /// If the pointer isn't an i8*, it will be converted. If a TBAA tag is
469 /// specified, it will be added to the instruction. Likewise with alias.scope
470 /// and noalias tags.
471 /// FIXME: Remove this function once transition to Align is over.
472 /// Use the version that takes Align instead of this one.
473 LLVM_ATTRIBUTE_DEPRECATED(CallInst *CreateElementUnorderedAtomicMemSet( Value *Ptr, Value
*Val, uint64_t Size, unsigned Alignment, uint32_t ElementSize
, MDNode *TBAATag = nullptr, MDNode *ScopeTag = nullptr, MDNode
*NoAliasTag = nullptr) __attribute__((deprecated("Use the version that takes Align instead of this one"
)))
474 CallInst *CreateElementUnorderedAtomicMemSet(CallInst *CreateElementUnorderedAtomicMemSet( Value *Ptr, Value
*Val, uint64_t Size, unsigned Alignment, uint32_t ElementSize
, MDNode *TBAATag = nullptr, MDNode *ScopeTag = nullptr, MDNode
*NoAliasTag = nullptr) __attribute__((deprecated("Use the version that takes Align instead of this one"
)))
475 Value *Ptr, Value *Val, uint64_t Size, unsigned Alignment,CallInst *CreateElementUnorderedAtomicMemSet( Value *Ptr, Value
*Val, uint64_t Size, unsigned Alignment, uint32_t ElementSize
, MDNode *TBAATag = nullptr, MDNode *ScopeTag = nullptr, MDNode
*NoAliasTag = nullptr) __attribute__((deprecated("Use the version that takes Align instead of this one"
)))
476 uint32_t ElementSize, MDNode *TBAATag = nullptr,CallInst *CreateElementUnorderedAtomicMemSet( Value *Ptr, Value
*Val, uint64_t Size, unsigned Alignment, uint32_t ElementSize
, MDNode *TBAATag = nullptr, MDNode *ScopeTag = nullptr, MDNode
*NoAliasTag = nullptr) __attribute__((deprecated("Use the version that takes Align instead of this one"
)))
477 MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr),CallInst *CreateElementUnorderedAtomicMemSet( Value *Ptr, Value
*Val, uint64_t Size, unsigned Alignment, uint32_t ElementSize
, MDNode *TBAATag = nullptr, MDNode *ScopeTag = nullptr, MDNode
*NoAliasTag = nullptr) __attribute__((deprecated("Use the version that takes Align instead of this one"
)))
478 "Use the version that takes Align instead of this one")CallInst *CreateElementUnorderedAtomicMemSet( Value *Ptr, Value
*Val, uint64_t Size, unsigned Alignment, uint32_t ElementSize
, MDNode *TBAATag = nullptr, MDNode *ScopeTag = nullptr, MDNode
*NoAliasTag = nullptr) __attribute__((deprecated("Use the version that takes Align instead of this one"
)))
{
479 return CreateElementUnorderedAtomicMemSet(Ptr, Val, getInt64(Size),
480 Align(Alignment), ElementSize,
481 TBAATag, ScopeTag, NoAliasTag);
482 }
483
484 CallInst *CreateElementUnorderedAtomicMemSet(Value *Ptr, Value *Val,
485 uint64_t Size, Align Alignment,
486 uint32_t ElementSize,
487 MDNode *TBAATag = nullptr,
488 MDNode *ScopeTag = nullptr,
489 MDNode *NoAliasTag = nullptr) {
490 return CreateElementUnorderedAtomicMemSet(Ptr, Val, getInt64(Size),
491 Align(Alignment), ElementSize,
492 TBAATag, ScopeTag, NoAliasTag);
493 }
494
495 /// FIXME: Remove this function once transition to Align is over.
496 /// Use the version that takes Align instead of this one.
497 LLVM_ATTRIBUTE_DEPRECATED(CallInst *CreateElementUnorderedAtomicMemSet( Value *Ptr, Value
*Val, Value *Size, unsigned Alignment, uint32_t ElementSize,
MDNode *TBAATag = nullptr, MDNode *ScopeTag = nullptr, MDNode
*NoAliasTag = nullptr) __attribute__((deprecated("Use the version that takes Align instead of this one"
)))
498 CallInst *CreateElementUnorderedAtomicMemSet(CallInst *CreateElementUnorderedAtomicMemSet( Value *Ptr, Value
*Val, Value *Size, unsigned Alignment, uint32_t ElementSize,
MDNode *TBAATag = nullptr, MDNode *ScopeTag = nullptr, MDNode
*NoAliasTag = nullptr) __attribute__((deprecated("Use the version that takes Align instead of this one"
)))
499 Value *Ptr, Value *Val, Value *Size, unsigned Alignment,CallInst *CreateElementUnorderedAtomicMemSet( Value *Ptr, Value
*Val, Value *Size, unsigned Alignment, uint32_t ElementSize,
MDNode *TBAATag = nullptr, MDNode *ScopeTag = nullptr, MDNode
*NoAliasTag = nullptr) __attribute__((deprecated("Use the version that takes Align instead of this one"
)))
500 uint32_t ElementSize, MDNode *TBAATag = nullptr,CallInst *CreateElementUnorderedAtomicMemSet( Value *Ptr, Value
*Val, Value *Size, unsigned Alignment, uint32_t ElementSize,
MDNode *TBAATag = nullptr, MDNode *ScopeTag = nullptr, MDNode
*NoAliasTag = nullptr) __attribute__((deprecated("Use the version that takes Align instead of this one"
)))
501 MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr),CallInst *CreateElementUnorderedAtomicMemSet( Value *Ptr, Value
*Val, Value *Size, unsigned Alignment, uint32_t ElementSize,
MDNode *TBAATag = nullptr, MDNode *ScopeTag = nullptr, MDNode
*NoAliasTag = nullptr) __attribute__((deprecated("Use the version that takes Align instead of this one"
)))
502 "Use the version that takes Align instead of this one")CallInst *CreateElementUnorderedAtomicMemSet( Value *Ptr, Value
*Val, Value *Size, unsigned Alignment, uint32_t ElementSize,
MDNode *TBAATag = nullptr, MDNode *ScopeTag = nullptr, MDNode
*NoAliasTag = nullptr) __attribute__((deprecated("Use the version that takes Align instead of this one"
)))
{
503 return CreateElementUnorderedAtomicMemSet(Ptr, Val, Size, Align(Alignment),
504 ElementSize, TBAATag, ScopeTag,
505 NoAliasTag);
506 }
507
508 CallInst *CreateElementUnorderedAtomicMemSet(Value *Ptr, Value *Val,
509 Value *Size, Align Alignment,
510 uint32_t ElementSize,
511 MDNode *TBAATag = nullptr,
512 MDNode *ScopeTag = nullptr,
513 MDNode *NoAliasTag = nullptr);
514
515 /// Create and insert a memcpy between the specified pointers.
516 ///
517 /// If the pointers aren't i8*, they will be converted. If a TBAA tag is
518 /// specified, it will be added to the instruction. Likewise with alias.scope
519 /// and noalias tags.
520 /// FIXME: Remove this function once transition to Align is over.
521 /// Use the version that takes MaybeAlign instead of this one.
522 LLVM_ATTRIBUTE_DEPRECATED(CallInst *CreateMemCpy(Value *Dst, unsigned DstAlign, Value *
Src, unsigned SrcAlign, uint64_t Size, bool isVolatile = false
, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr) __attribute__
((deprecated("Use the version that takes MaybeAlign instead")
))
523 CallInst *CreateMemCpy(Value *Dst, unsigned DstAlign, Value *Src,CallInst *CreateMemCpy(Value *Dst, unsigned DstAlign, Value *
Src, unsigned SrcAlign, uint64_t Size, bool isVolatile = false
, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr) __attribute__
((deprecated("Use the version that takes MaybeAlign instead")
))
524 unsigned SrcAlign, uint64_t Size,CallInst *CreateMemCpy(Value *Dst, unsigned DstAlign, Value *
Src, unsigned SrcAlign, uint64_t Size, bool isVolatile = false
, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr) __attribute__
((deprecated("Use the version that takes MaybeAlign instead")
))
525 bool isVolatile = false, MDNode *TBAATag = nullptr,CallInst *CreateMemCpy(Value *Dst, unsigned DstAlign, Value *
Src, unsigned SrcAlign, uint64_t Size, bool isVolatile = false
, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr) __attribute__
((deprecated("Use the version that takes MaybeAlign instead")
))
526 MDNode *TBAAStructTag = nullptr,CallInst *CreateMemCpy(Value *Dst, unsigned DstAlign, Value *
Src, unsigned SrcAlign, uint64_t Size, bool isVolatile = false
, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr) __attribute__
((deprecated("Use the version that takes MaybeAlign instead")
))
527 MDNode *ScopeTag = nullptr,CallInst *CreateMemCpy(Value *Dst, unsigned DstAlign, Value *
Src, unsigned SrcAlign, uint64_t Size, bool isVolatile = false
, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr) __attribute__
((deprecated("Use the version that takes MaybeAlign instead")
))
528 MDNode *NoAliasTag = nullptr),CallInst *CreateMemCpy(Value *Dst, unsigned DstAlign, Value *
Src, unsigned SrcAlign, uint64_t Size, bool isVolatile = false
, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr) __attribute__
((deprecated("Use the version that takes MaybeAlign instead")
))
529 "Use the version that takes MaybeAlign instead")CallInst *CreateMemCpy(Value *Dst, unsigned DstAlign, Value *
Src, unsigned SrcAlign, uint64_t Size, bool isVolatile = false
, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr) __attribute__
((deprecated("Use the version that takes MaybeAlign instead")
))
{
530 return CreateMemCpy(Dst, MaybeAlign(DstAlign), Src, MaybeAlign(SrcAlign),
531 getInt64(Size), isVolatile, TBAATag, TBAAStructTag,
532 ScopeTag, NoAliasTag);
533 }
534
535 CallInst *CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src,
536 MaybeAlign SrcAlign, uint64_t Size,
537 bool isVolatile = false, MDNode *TBAATag = nullptr,
538 MDNode *TBAAStructTag = nullptr,
539 MDNode *ScopeTag = nullptr,
540 MDNode *NoAliasTag = nullptr) {
541 return CreateMemCpy(Dst, DstAlign, Src, SrcAlign, getInt64(Size),
542 isVolatile, TBAATag, TBAAStructTag, ScopeTag,
543 NoAliasTag);
544 }
545
546 /// FIXME: Remove this function once transition to Align is over.
547 /// Use the version that takes MaybeAlign instead of this one.
548 LLVM_ATTRIBUTE_DEPRECATED(CallInst *CreateMemCpy(Value *Dst, unsigned DstAlign, Value *
Src, unsigned SrcAlign, Value *Size, bool isVolatile = false,
MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr, MDNode
*ScopeTag = nullptr, MDNode *NoAliasTag = nullptr) __attribute__
((deprecated("Use the version that takes MaybeAlign instead")
))
549 CallInst *CreateMemCpy(Value *Dst, unsigned DstAlign, Value *Src,CallInst *CreateMemCpy(Value *Dst, unsigned DstAlign, Value *
Src, unsigned SrcAlign, Value *Size, bool isVolatile = false,
MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr, MDNode
*ScopeTag = nullptr, MDNode *NoAliasTag = nullptr) __attribute__
((deprecated("Use the version that takes MaybeAlign instead")
))
550 unsigned SrcAlign, Value *Size,CallInst *CreateMemCpy(Value *Dst, unsigned DstAlign, Value *
Src, unsigned SrcAlign, Value *Size, bool isVolatile = false,
MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr, MDNode
*ScopeTag = nullptr, MDNode *NoAliasTag = nullptr) __attribute__
((deprecated("Use the version that takes MaybeAlign instead")
))
551 bool isVolatile = false, MDNode *TBAATag = nullptr,CallInst *CreateMemCpy(Value *Dst, unsigned DstAlign, Value *
Src, unsigned SrcAlign, Value *Size, bool isVolatile = false,
MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr, MDNode
*ScopeTag = nullptr, MDNode *NoAliasTag = nullptr) __attribute__
((deprecated("Use the version that takes MaybeAlign instead")
))
552 MDNode *TBAAStructTag = nullptr,CallInst *CreateMemCpy(Value *Dst, unsigned DstAlign, Value *
Src, unsigned SrcAlign, Value *Size, bool isVolatile = false,
MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr, MDNode
*ScopeTag = nullptr, MDNode *NoAliasTag = nullptr) __attribute__
((deprecated("Use the version that takes MaybeAlign instead")
))
553 MDNode *ScopeTag = nullptr,CallInst *CreateMemCpy(Value *Dst, unsigned DstAlign, Value *
Src, unsigned SrcAlign, Value *Size, bool isVolatile = false,
MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr, MDNode
*ScopeTag = nullptr, MDNode *NoAliasTag = nullptr) __attribute__
((deprecated("Use the version that takes MaybeAlign instead")
))
554 MDNode *NoAliasTag = nullptr),CallInst *CreateMemCpy(Value *Dst, unsigned DstAlign, Value *
Src, unsigned SrcAlign, Value *Size, bool isVolatile = false,
MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr, MDNode
*ScopeTag = nullptr, MDNode *NoAliasTag = nullptr) __attribute__
((deprecated("Use the version that takes MaybeAlign instead")
))
555 "Use the version that takes MaybeAlign instead")CallInst *CreateMemCpy(Value *Dst, unsigned DstAlign, Value *
Src, unsigned SrcAlign, Value *Size, bool isVolatile = false,
MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr, MDNode
*ScopeTag = nullptr, MDNode *NoAliasTag = nullptr) __attribute__
((deprecated("Use the version that takes MaybeAlign instead")
))
;
556 CallInst *CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src,
557 MaybeAlign SrcAlign, Value *Size,
558 bool isVolatile = false, MDNode *TBAATag = nullptr,
559 MDNode *TBAAStructTag = nullptr,
560 MDNode *ScopeTag = nullptr,
561 MDNode *NoAliasTag = nullptr);
562
563 /// Create and insert an element unordered-atomic memcpy between the
564 /// specified pointers.
565 ///
566 /// DstAlign/SrcAlign are the alignments of the Dst/Src pointers, respectively.
567 ///
568 /// If the pointers aren't i8*, they will be converted. If a TBAA tag is
569 /// specified, it will be added to the instruction. Likewise with alias.scope
570 /// and noalias tags.
571 CallInst *CreateElementUnorderedAtomicMemCpy(
572 Value *Dst, unsigned DstAlign, Value *Src, unsigned SrcAlign,
573 uint64_t Size, uint32_t ElementSize, MDNode *TBAATag = nullptr,
574 MDNode *TBAAStructTag = nullptr, MDNode *ScopeTag = nullptr,
575 MDNode *NoAliasTag = nullptr) {
576 return CreateElementUnorderedAtomicMemCpy(
577 Dst, DstAlign, Src, SrcAlign, getInt64(Size), ElementSize, TBAATag,
578 TBAAStructTag, ScopeTag, NoAliasTag);
579 }
580
581 CallInst *CreateElementUnorderedAtomicMemCpy(
582 Value *Dst, unsigned DstAlign, Value *Src, unsigned SrcAlign, Value *Size,
583 uint32_t ElementSize, MDNode *TBAATag = nullptr,
584 MDNode *TBAAStructTag = nullptr, MDNode *ScopeTag = nullptr,
585 MDNode *NoAliasTag = nullptr);
586
587 /// Create and insert a memmove between the specified
588 /// pointers.
589 ///
590 /// If the pointers aren't i8*, they will be converted. If a TBAA tag is
591 /// specified, it will be added to the instruction. Likewise with alias.scope
592 /// and noalias tags.
593 /// FIXME: Remove this function once transition to Align is over.
594 /// Use the version that takes MaybeAlign instead of this one.
595 LLVM_ATTRIBUTE_DEPRECATED(CallInst *CreateMemMove( Value *Dst, unsigned DstAlign, Value
*Src, unsigned SrcAlign, uint64_t Size, bool isVolatile = false
, MDNode *TBAATag = nullptr, MDNode *ScopeTag = nullptr, MDNode
*NoAliasTag = nullptr) __attribute__((deprecated("Use the version that takes MaybeAlign"
)))
596 CallInst *CreateMemMove(CallInst *CreateMemMove( Value *Dst, unsigned DstAlign, Value
*Src, unsigned SrcAlign, uint64_t Size, bool isVolatile = false
, MDNode *TBAATag = nullptr, MDNode *ScopeTag = nullptr, MDNode
*NoAliasTag = nullptr) __attribute__((deprecated("Use the version that takes MaybeAlign"
)))
597 Value *Dst, unsigned DstAlign, Value *Src, unsigned SrcAlign,CallInst *CreateMemMove( Value *Dst, unsigned DstAlign, Value
*Src, unsigned SrcAlign, uint64_t Size, bool isVolatile = false
, MDNode *TBAATag = nullptr, MDNode *ScopeTag = nullptr, MDNode
*NoAliasTag = nullptr) __attribute__((deprecated("Use the version that takes MaybeAlign"
)))
598 uint64_t Size, bool isVolatile = false, MDNode *TBAATag = nullptr,CallInst *CreateMemMove( Value *Dst, unsigned DstAlign, Value
*Src, unsigned SrcAlign, uint64_t Size, bool isVolatile = false
, MDNode *TBAATag = nullptr, MDNode *ScopeTag = nullptr, MDNode
*NoAliasTag = nullptr) __attribute__((deprecated("Use the version that takes MaybeAlign"
)))
599 MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr),CallInst *CreateMemMove( Value *Dst, unsigned DstAlign, Value
*Src, unsigned SrcAlign, uint64_t Size, bool isVolatile = false
, MDNode *TBAATag = nullptr, MDNode *ScopeTag = nullptr, MDNode
*NoAliasTag = nullptr) __attribute__((deprecated("Use the version that takes MaybeAlign"
)))
600 "Use the version that takes MaybeAlign")CallInst *CreateMemMove( Value *Dst, unsigned DstAlign, Value
*Src, unsigned SrcAlign, uint64_t Size, bool isVolatile = false
, MDNode *TBAATag = nullptr, MDNode *ScopeTag = nullptr, MDNode
*NoAliasTag = nullptr) __attribute__((deprecated("Use the version that takes MaybeAlign"
)))
{
601 return CreateMemMove(Dst, MaybeAlign(DstAlign), Src, MaybeAlign(SrcAlign),
602 getInt64(Size), isVolatile, TBAATag, ScopeTag,
603 NoAliasTag);
604 }
605 CallInst *CreateMemMove(Value *Dst, MaybeAlign DstAlign, Value *Src,
606 MaybeAlign SrcAlign, uint64_t Size,
607 bool isVolatile = false, MDNode *TBAATag = nullptr,
608 MDNode *ScopeTag = nullptr,
609 MDNode *NoAliasTag = nullptr) {
610 return CreateMemMove(Dst, DstAlign, Src, SrcAlign, getInt64(Size),
611 isVolatile, TBAATag, ScopeTag, NoAliasTag);
612 }
613 /// FIXME: Remove this function once transition to Align is over.
614 /// Use the version that takes MaybeAlign instead of this one.
615 LLVM_ATTRIBUTE_DEPRECATED(CallInst *CreateMemMove( Value *Dst, unsigned DstAlign, Value
*Src, unsigned SrcAlign, Value *Size, bool isVolatile = false
, MDNode *TBAATag = nullptr, MDNode *ScopeTag = nullptr, MDNode
*NoAliasTag = nullptr) __attribute__((deprecated("Use the version that takes MaybeAlign"
)))
616 CallInst *CreateMemMove(CallInst *CreateMemMove( Value *Dst, unsigned DstAlign, Value
*Src, unsigned SrcAlign, Value *Size, bool isVolatile = false
, MDNode *TBAATag = nullptr, MDNode *ScopeTag = nullptr, MDNode
*NoAliasTag = nullptr) __attribute__((deprecated("Use the version that takes MaybeAlign"
)))
617 Value *Dst, unsigned DstAlign, Value *Src, unsigned SrcAlign,CallInst *CreateMemMove( Value *Dst, unsigned DstAlign, Value
*Src, unsigned SrcAlign, Value *Size, bool isVolatile = false
, MDNode *TBAATag = nullptr, MDNode *ScopeTag = nullptr, MDNode
*NoAliasTag = nullptr) __attribute__((deprecated("Use the version that takes MaybeAlign"
)))
618 Value *Size, bool isVolatile = false, MDNode *TBAATag = nullptr,CallInst *CreateMemMove( Value *Dst, unsigned DstAlign, Value
*Src, unsigned SrcAlign, Value *Size, bool isVolatile = false
, MDNode *TBAATag = nullptr, MDNode *ScopeTag = nullptr, MDNode
*NoAliasTag = nullptr) __attribute__((deprecated("Use the version that takes MaybeAlign"
)))
619 MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr),CallInst *CreateMemMove( Value *Dst, unsigned DstAlign, Value
*Src, unsigned SrcAlign, Value *Size, bool isVolatile = false
, MDNode *TBAATag = nullptr, MDNode *ScopeTag = nullptr, MDNode
*NoAliasTag = nullptr) __attribute__((deprecated("Use the version that takes MaybeAlign"
)))
620 "Use the version that takes MaybeAlign")CallInst *CreateMemMove( Value *Dst, unsigned DstAlign, Value
*Src, unsigned SrcAlign, Value *Size, bool isVolatile = false
, MDNode *TBAATag = nullptr, MDNode *ScopeTag = nullptr, MDNode
*NoAliasTag = nullptr) __attribute__((deprecated("Use the version that takes MaybeAlign"
)))
{
621 return CreateMemMove(Dst, MaybeAlign(DstAlign), Src, MaybeAlign(SrcAlign),
622 Size, isVolatile, TBAATag, ScopeTag, NoAliasTag);
623 }
624 CallInst *CreateMemMove(Value *Dst, MaybeAlign DstAlign, Value *Src,
625 MaybeAlign SrcAlign, Value *Size,
626 bool isVolatile = false, MDNode *TBAATag = nullptr,
627 MDNode *ScopeTag = nullptr,
628 MDNode *NoAliasTag = nullptr);
629
630 /// \brief Create and insert an element unordered-atomic memmove between the
631 /// specified pointers.
632 ///
633 /// DstAlign/SrcAlign are the alignments of the Dst/Src pointers,
634 /// respectively.
635 ///
636 /// If the pointers aren't i8*, they will be converted. If a TBAA tag is
637 /// specified, it will be added to the instruction. Likewise with alias.scope
638 /// and noalias tags.
639 CallInst *CreateElementUnorderedAtomicMemMove(
640 Value *Dst, unsigned DstAlign, Value *Src, unsigned SrcAlign,
641 uint64_t Size, uint32_t ElementSize, MDNode *TBAATag = nullptr,
642 MDNode *TBAAStructTag = nullptr, MDNode *ScopeTag = nullptr,
643 MDNode *NoAliasTag = nullptr) {
644 return CreateElementUnorderedAtomicMemMove(
645 Dst, DstAlign, Src, SrcAlign, getInt64(Size), ElementSize, TBAATag,
646 TBAAStructTag, ScopeTag, NoAliasTag);
647 }
648
649 CallInst *CreateElementUnorderedAtomicMemMove(
650 Value *Dst, unsigned DstAlign, Value *Src, unsigned SrcAlign, Value *Size,
651 uint32_t ElementSize, MDNode *TBAATag = nullptr,
652 MDNode *TBAAStructTag = nullptr, MDNode *ScopeTag = nullptr,
653 MDNode *NoAliasTag = nullptr);
654
655 /// Create a vector fadd reduction intrinsic of the source vector.
656 /// The first parameter is a scalar accumulator value for ordered reductions.
657 CallInst *CreateFAddReduce(Value *Acc, Value *Src);
658
659 /// Create a vector fmul reduction intrinsic of the source vector.
660 /// The first parameter is a scalar accumulator value for ordered reductions.
661 CallInst *CreateFMulReduce(Value *Acc, Value *Src);
662
663 /// Create a vector int add reduction intrinsic of the source vector.
664 CallInst *CreateAddReduce(Value *Src);
665
666 /// Create a vector int mul reduction intrinsic of the source vector.
667 CallInst *CreateMulReduce(Value *Src);
668
669 /// Create a vector int AND reduction intrinsic of the source vector.
670 CallInst *CreateAndReduce(Value *Src);
671
672 /// Create a vector int OR reduction intrinsic of the source vector.
673 CallInst *CreateOrReduce(Value *Src);
674
675 /// Create a vector int XOR reduction intrinsic of the source vector.
676 CallInst *CreateXorReduce(Value *Src);
677
678 /// Create a vector integer max reduction intrinsic of the source
679 /// vector.
680 CallInst *CreateIntMaxReduce(Value *Src, bool IsSigned = false);
681
682 /// Create a vector integer min reduction intrinsic of the source
683 /// vector.
684 CallInst *CreateIntMinReduce(Value *Src, bool IsSigned = false);
685
686 /// Create a vector float max reduction intrinsic of the source
687 /// vector.
688 CallInst *CreateFPMaxReduce(Value *Src, bool NoNaN = false);
689
690 /// Create a vector float min reduction intrinsic of the source
691 /// vector.
692 CallInst *CreateFPMinReduce(Value *Src, bool NoNaN = false);
693
694 /// Create a lifetime.start intrinsic.
695 ///
696 /// If the pointer isn't i8* it will be converted.
697 CallInst *CreateLifetimeStart(Value *Ptr, ConstantInt *Size = nullptr);
698
699 /// Create a lifetime.end intrinsic.
700 ///
701 /// If the pointer isn't i8* it will be converted.
702 CallInst *CreateLifetimeEnd(Value *Ptr, ConstantInt *Size = nullptr);
703
704 /// Create a call to invariant.start intrinsic.
705 ///
706 /// If the pointer isn't i8* it will be converted.
707 CallInst *CreateInvariantStart(Value *Ptr, ConstantInt *Size = nullptr);
708
709 /// Create a call to Masked Load intrinsic
710 CallInst *CreateMaskedLoad(Value *Ptr, unsigned Align, Value *Mask,
711 Value *PassThru = nullptr, const Twine &Name = "");
712
713 /// Create a call to Masked Store intrinsic
714 CallInst *CreateMaskedStore(Value *Val, Value *Ptr, unsigned Align,
715 Value *Mask);
716
717 /// Create a call to Masked Gather intrinsic
718 CallInst *CreateMaskedGather(Value *Ptrs, unsigned Align,
719 Value *Mask = nullptr,
720 Value *PassThru = nullptr,
721 const Twine& Name = "");
722
723 /// Create a call to Masked Scatter intrinsic
724 CallInst *CreateMaskedScatter(Value *Val, Value *Ptrs, unsigned Align,
725 Value *Mask = nullptr);
726
727 /// Create an assume intrinsic call that allows the optimizer to
728 /// assume that the provided condition will be true.
729 CallInst *CreateAssumption(Value *Cond);
730
731 /// Create a call to the experimental.gc.statepoint intrinsic to
732 /// start a new statepoint sequence.
733 CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes,
734 Value *ActualCallee,
735 ArrayRef<Value *> CallArgs,
736 ArrayRef<Value *> DeoptArgs,
737 ArrayRef<Value *> GCArgs,
738 const Twine &Name = "");
739
740 /// Create a call to the experimental.gc.statepoint intrinsic to
741 /// start a new statepoint sequence.
742 CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes,
743 Value *ActualCallee, uint32_t Flags,
744 ArrayRef<Use> CallArgs,
745 ArrayRef<Use> TransitionArgs,
746 ArrayRef<Use> DeoptArgs,
747 ArrayRef<Value *> GCArgs,
748 const Twine &Name = "");
749
750 /// Conveninence function for the common case when CallArgs are filled
751 /// in using makeArrayRef(CS.arg_begin(), CS.arg_end()); Use needs to be
752 /// .get()'ed to get the Value pointer.
753 CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes,
754 Value *ActualCallee, ArrayRef<Use> CallArgs,
755 ArrayRef<Value *> DeoptArgs,
756 ArrayRef<Value *> GCArgs,
757 const Twine &Name = "");
758
759 /// Create an invoke to the experimental.gc.statepoint intrinsic to
760 /// start a new statepoint sequence.
761 InvokeInst *
762 CreateGCStatepointInvoke(uint64_t ID, uint32_t NumPatchBytes,
763 Value *ActualInvokee, BasicBlock *NormalDest,
764 BasicBlock *UnwindDest, ArrayRef<Value *> InvokeArgs,
765 ArrayRef<Value *> DeoptArgs,
766 ArrayRef<Value *> GCArgs, const Twine &Name = "");
767
768 /// Create an invoke to the experimental.gc.statepoint intrinsic to
769 /// start a new statepoint sequence.
770 InvokeInst *CreateGCStatepointInvoke(
771 uint64_t ID, uint32_t NumPatchBytes, Value *ActualInvokee,
772 BasicBlock *NormalDest, BasicBlock *UnwindDest, uint32_t Flags,
773 ArrayRef<Use> InvokeArgs, ArrayRef<Use> TransitionArgs,
774 ArrayRef<Use> DeoptArgs, ArrayRef<Value *> GCArgs,
775 const Twine &Name = "");
776
777 // Convenience function for the common case when CallArgs are filled in using
778 // makeArrayRef(CS.arg_begin(), CS.arg_end()); Use needs to be .get()'ed to
779 // get the Value *.
780 InvokeInst *
781 CreateGCStatepointInvoke(uint64_t ID, uint32_t NumPatchBytes,
782 Value *ActualInvokee, BasicBlock *NormalDest,
783 BasicBlock *UnwindDest, ArrayRef<Use> InvokeArgs,
784 ArrayRef<Value *> DeoptArgs,
785 ArrayRef<Value *> GCArgs, const Twine &Name = "");
786
787 /// Create a call to the experimental.gc.result intrinsic to extract
788 /// the result from a call wrapped in a statepoint.
789 CallInst *CreateGCResult(Instruction *Statepoint,
790 Type *ResultType,
791 const Twine &Name = "");
792
793 /// Create a call to the experimental.gc.relocate intrinsics to
794 /// project the relocated value of one pointer from the statepoint.
795 CallInst *CreateGCRelocate(Instruction *Statepoint,
796 int BaseOffset,
797 int DerivedOffset,
798 Type *ResultType,
799 const Twine &Name = "");
800
801 /// Create a call to intrinsic \p ID with 1 operand which is mangled on its
802 /// type.
803 CallInst *CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V,
804 Instruction *FMFSource = nullptr,
805 const Twine &Name = "");
806
807 /// Create a call to intrinsic \p ID with 2 operands which is mangled on the
808 /// first type.
809 CallInst *CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS,
810 Instruction *FMFSource = nullptr,
811 const Twine &Name = "");
812
813 /// Create a call to intrinsic \p ID with \p args, mangled using \p Types. If
814 /// \p FMFSource is provided, copy fast-math-flags from that instruction to
815 /// the intrinsic.
816 CallInst *CreateIntrinsic(Intrinsic::ID ID, ArrayRef<Type *> Types,
817 ArrayRef<Value *> Args,
818 Instruction *FMFSource = nullptr,
819 const Twine &Name = "");
820
821 /// Create call to the minnum intrinsic.
822 CallInst *CreateMinNum(Value *LHS, Value *RHS, const Twine &Name = "") {
823 return CreateBinaryIntrinsic(Intrinsic::minnum, LHS, RHS, nullptr, Name);
824 }
825
826 /// Create call to the maxnum intrinsic.
827 CallInst *CreateMaxNum(Value *LHS, Value *RHS, const Twine &Name = "") {
828 return CreateBinaryIntrinsic(Intrinsic::maxnum, LHS, RHS, nullptr, Name);
829 }
830
831 /// Create call to the minimum intrinsic.
832 CallInst *CreateMinimum(Value *LHS, Value *RHS, const Twine &Name = "") {
833 return CreateBinaryIntrinsic(Intrinsic::minimum, LHS, RHS, nullptr, Name);
834 }
835
836 /// Create call to the maximum intrinsic.
837 CallInst *CreateMaximum(Value *LHS, Value *RHS, const Twine &Name = "") {
838 return CreateBinaryIntrinsic(Intrinsic::maximum, LHS, RHS, nullptr, Name);
839 }
840
841private:
842 /// Create a call to a masked intrinsic with given Id.
843 CallInst *CreateMaskedIntrinsic(Intrinsic::ID Id, ArrayRef<Value *> Ops,
844 ArrayRef<Type *> OverloadedTypes,
845 const Twine &Name = "");
846
847 Value *getCastedInt8PtrValue(Value *Ptr);
848};
849
850/// This provides a uniform API for creating instructions and inserting
851/// them into a basic block: either at the end of a BasicBlock, or at a specific
852/// iterator location in a block.
853///
854/// Note that the builder does not expose the full generality of LLVM
855/// instructions. For access to extra instruction properties, use the mutators
856/// (e.g. setVolatile) on the instructions after they have been
857/// created. Convenience state exists to specify fast-math flags and fp-math
858/// tags.
859///
860/// The first template argument specifies a class to use for creating constants.
861/// This defaults to creating minimally folded constants. The second template
862/// argument allows clients to specify custom insertion hooks that are called on
863/// every newly created insertion.
864template <typename T = ConstantFolder,
865 typename Inserter = IRBuilderDefaultInserter>
866class IRBuilder : public IRBuilderBase, public Inserter {
867 T Folder;
868
869public:
870 IRBuilder(LLVMContext &C, const T &F, Inserter I = Inserter(),
871 MDNode *FPMathTag = nullptr,
872 ArrayRef<OperandBundleDef> OpBundles = None)
873 : IRBuilderBase(C, FPMathTag, OpBundles), Inserter(std::move(I)),
874 Folder(F) {}
875
876 explicit IRBuilder(LLVMContext &C, MDNode *FPMathTag = nullptr,
877 ArrayRef<OperandBundleDef> OpBundles = None)
878 : IRBuilderBase(C, FPMathTag, OpBundles) {}
879
880 explicit IRBuilder(BasicBlock *TheBB, const T &F, MDNode *FPMathTag = nullptr,
881 ArrayRef<OperandBundleDef> OpBundles = None)
882 : IRBuilderBase(TheBB->getContext(), FPMathTag, OpBundles), Folder(F) {
883 SetInsertPoint(TheBB);
884 }
885
886 explicit IRBuilder(BasicBlock *TheBB, MDNode *FPMathTag = nullptr,
887 ArrayRef<OperandBundleDef> OpBundles = None)
888 : IRBuilderBase(TheBB->getContext(), FPMathTag, OpBundles) {
889 SetInsertPoint(TheBB);
890 }
891
892 explicit IRBuilder(Instruction *IP, MDNode *FPMathTag = nullptr,
893 ArrayRef<OperandBundleDef> OpBundles = None)
894 : IRBuilderBase(IP->getContext(), FPMathTag, OpBundles) {
895 SetInsertPoint(IP);
896 }
897
898 IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP, const T &F,
899 MDNode *FPMathTag = nullptr,
900 ArrayRef<OperandBundleDef> OpBundles = None)
901 : IRBuilderBase(TheBB->getContext(), FPMathTag, OpBundles), Folder(F) {
902 SetInsertPoint(TheBB, IP);
903 }
904
905 IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP,
906 MDNode *FPMathTag = nullptr,
907 ArrayRef<OperandBundleDef> OpBundles = None)
908 : IRBuilderBase(TheBB->getContext(), FPMathTag, OpBundles) {
909 SetInsertPoint(TheBB, IP);
910 }
911
912 /// Get the constant folder being used.
913 const T &getFolder() { return Folder; }
914
915 /// Insert and return the specified instruction.
916 template<typename InstTy>
917 InstTy *Insert(InstTy *I, const Twine &Name = "") const {
918 this->InsertHelper(I, Name, BB, InsertPt);
919 this->SetInstDebugLocation(I);
920 return I;
921 }
922
923 /// No-op overload to handle constants.
924 Constant *Insert(Constant *C, const Twine& = "") const {
925 return C;
926 }
927
928 //===--------------------------------------------------------------------===//
929 // Instruction creation methods: Terminators
930 //===--------------------------------------------------------------------===//
931
932private:
933 /// Helper to add branch weight and unpredictable metadata onto an
934 /// instruction.
935 /// \returns The annotated instruction.
936 template <typename InstTy>
937 InstTy *addBranchMetadata(InstTy *I, MDNode *Weights, MDNode *Unpredictable) {
938 if (Weights)
939 I->setMetadata(LLVMContext::MD_prof, Weights);
940 if (Unpredictable)
941 I->setMetadata(LLVMContext::MD_unpredictable, Unpredictable);
942 return I;
943 }
944
945public:
946 /// Create a 'ret void' instruction.
947 ReturnInst *CreateRetVoid() {
948 return Insert(ReturnInst::Create(Context));
949 }
950
951 /// Create a 'ret <val>' instruction.
952 ReturnInst *CreateRet(Value *V) {
953 return Insert(ReturnInst::Create(Context, V));
954 }
955
956 /// Create a sequence of N insertvalue instructions,
957 /// with one Value from the retVals array each, that build a aggregate
958 /// return value one value at a time, and a ret instruction to return
959 /// the resulting aggregate value.
960 ///
961 /// This is a convenience function for code that uses aggregate return values
962 /// as a vehicle for having multiple return values.
963 ReturnInst *CreateAggregateRet(Value *const *retVals, unsigned N) {
964 Value *V = UndefValue::get(getCurrentFunctionReturnType());
965 for (unsigned i = 0; i != N; ++i)
966 V = CreateInsertValue(V, retVals[i], i, "mrv");
967 return Insert(ReturnInst::Create(Context, V));
968 }
969
970 /// Create an unconditional 'br label X' instruction.
971 BranchInst *CreateBr(BasicBlock *Dest) {
972 return Insert(BranchInst::Create(Dest));
973 }
974
975 /// Create a conditional 'br Cond, TrueDest, FalseDest'
976 /// instruction.
977 BranchInst *CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False,
978 MDNode *BranchWeights = nullptr,
979 MDNode *Unpredictable = nullptr) {
980 return Insert(addBranchMetadata(BranchInst::Create(True, False, Cond),
981 BranchWeights, Unpredictable));
982 }
983
984 /// Create a conditional 'br Cond, TrueDest, FalseDest'
985 /// instruction. Copy branch meta data if available.
986 BranchInst *CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False,
987 Instruction *MDSrc) {
988 BranchInst *Br = BranchInst::Create(True, False, Cond);
989 if (MDSrc) {
990 unsigned WL[4] = {LLVMContext::MD_prof, LLVMContext::MD_unpredictable,
991 LLVMContext::MD_make_implicit, LLVMContext::MD_dbg};
992 Br->copyMetadata(*MDSrc, makeArrayRef(&WL[0], 4));
993 }
994 return Insert(Br);
995 }
996
997 /// Create a switch instruction with the specified value, default dest,
998 /// and with a hint for the number of cases that will be added (for efficient
999 /// allocation).
1000 SwitchInst *CreateSwitch(Value *V, BasicBlock *Dest, unsigned NumCases = 10,
1001 MDNode *BranchWeights = nullptr,
1002 MDNode *Unpredictable = nullptr) {
1003 return Insert(addBranchMetadata(SwitchInst::Create(V, Dest, NumCases),
1004 BranchWeights, Unpredictable));
1005 }
1006
1007 /// Create an indirect branch instruction with the specified address
1008 /// operand, with an optional hint for the number of destinations that will be
1009 /// added (for efficient allocation).
1010 IndirectBrInst *CreateIndirectBr(Value *Addr, unsigned NumDests = 10) {
1011 return Insert(IndirectBrInst::Create(Addr, NumDests));
1012 }
1013
1014 /// Create an invoke instruction.
1015 InvokeInst *CreateInvoke(FunctionType *Ty, Value *Callee,
1016 BasicBlock *NormalDest, BasicBlock *UnwindDest,
1017 ArrayRef<Value *> Args,
1018 ArrayRef<OperandBundleDef> OpBundles,
1019 const Twine &Name = "") {
1020 return Insert(
1021 InvokeInst::Create(Ty, Callee, NormalDest, UnwindDest, Args, OpBundles),
1022 Name);
1023 }
1024 InvokeInst *CreateInvoke(FunctionType *Ty, Value *Callee,
1025 BasicBlock *NormalDest, BasicBlock *UnwindDest,
1026 ArrayRef<Value *> Args = None,
1027 const Twine &Name = "") {
1028 return Insert(InvokeInst::Create(Ty, Callee, NormalDest, UnwindDest, Args),
1029 Name);
1030 }
1031
1032 InvokeInst *CreateInvoke(FunctionCallee Callee, BasicBlock *NormalDest,
1033 BasicBlock *UnwindDest, ArrayRef<Value *> Args,
1034 ArrayRef<OperandBundleDef> OpBundles,
1035 const Twine &Name = "") {
1036 return CreateInvoke(Callee.getFunctionType(), Callee.getCallee(),
1037 NormalDest, UnwindDest, Args, OpBundles, Name);
1038 }
1039
1040 InvokeInst *CreateInvoke(FunctionCallee Callee, BasicBlock *NormalDest,
1041 BasicBlock *UnwindDest,
1042 ArrayRef<Value *> Args = None,
1043 const Twine &Name = "") {
1044 return CreateInvoke(Callee.getFunctionType(), Callee.getCallee(),
1045 NormalDest, UnwindDest, Args, Name);
1046 }
1047
1048 // Deprecated [opaque pointer types]
1049 InvokeInst *CreateInvoke(Value *Callee, BasicBlock *NormalDest,
1050 BasicBlock *UnwindDest, ArrayRef<Value *> Args,
1051 ArrayRef<OperandBundleDef> OpBundles,
1052 const Twine &Name = "") {
1053 return CreateInvoke(
1054 cast<FunctionType>(
1055 cast<PointerType>(Callee->getType())->getElementType()),
1056 Callee, NormalDest, UnwindDest, Args, OpBundles, Name);
1057 }
1058
1059 // Deprecated [opaque pointer types]
1060 InvokeInst *CreateInvoke(Value *Callee, BasicBlock *NormalDest,
1061 BasicBlock *UnwindDest,
1062 ArrayRef<Value *> Args = None,
1063 const Twine &Name = "") {
1064 return CreateInvoke(
1065 cast<FunctionType>(
1066 cast<PointerType>(Callee->getType())->getElementType()),
1067 Callee, NormalDest, UnwindDest, Args, Name);
1068 }
1069
1070 /// \brief Create a callbr instruction.
1071 CallBrInst *CreateCallBr(FunctionType *Ty, Value *Callee,
1072 BasicBlock *DefaultDest,
1073 ArrayRef<BasicBlock *> IndirectDests,
1074 ArrayRef<Value *> Args = None,
1075 const Twine &Name = "") {
1076 return Insert(CallBrInst::Create(Ty, Callee, DefaultDest, IndirectDests,
1077 Args), Name);
1078 }
1079 CallBrInst *CreateCallBr(FunctionType *Ty, Value *Callee,
1080 BasicBlock *DefaultDest,
1081 ArrayRef<BasicBlock *> IndirectDests,
1082 ArrayRef<Value *> Args,
1083 ArrayRef<OperandBundleDef> OpBundles,
1084 const Twine &Name = "") {
1085 return Insert(
1086 CallBrInst::Create(Ty, Callee, DefaultDest, IndirectDests, Args,
1087 OpBundles), Name);
1088 }
1089
1090 CallBrInst *CreateCallBr(FunctionCallee Callee, BasicBlock *DefaultDest,
1091 ArrayRef<BasicBlock *> IndirectDests,
1092 ArrayRef<Value *> Args = None,
1093 const Twine &Name = "") {
1094 return CreateCallBr(Callee.getFunctionType(), Callee.getCallee(),
1095 DefaultDest, IndirectDests, Args, Name);
1096 }
1097 CallBrInst *CreateCallBr(FunctionCallee Callee, BasicBlock *DefaultDest,
1098 ArrayRef<BasicBlock *> IndirectDests,
1099 ArrayRef<Value *> Args,
1100 ArrayRef<OperandBundleDef> OpBundles,
1101 const Twine &Name = "") {
1102 return CreateCallBr(Callee.getFunctionType(), Callee.getCallee(),
1103 DefaultDest, IndirectDests, Args, Name);
1104 }
1105
1106 ResumeInst *CreateResume(Value *Exn) {
1107 return Insert(ResumeInst::Create(Exn));
1108 }
1109
1110 CleanupReturnInst *CreateCleanupRet(CleanupPadInst *CleanupPad,
1111 BasicBlock *UnwindBB = nullptr) {
1112 return Insert(CleanupReturnInst::Create(CleanupPad, UnwindBB));
1113 }
1114
1115 CatchSwitchInst *CreateCatchSwitch(Value *ParentPad, BasicBlock *UnwindBB,
1116 unsigned NumHandlers,
1117 const Twine &Name = "") {
1118 return Insert(CatchSwitchInst::Create(ParentPad, UnwindBB, NumHandlers),
1119 Name);
1120 }
1121
1122 CatchPadInst *CreateCatchPad(Value *ParentPad, ArrayRef<Value *> Args,
1123 const Twine &Name = "") {
1124 return Insert(CatchPadInst::Create(ParentPad, Args), Name);
1125 }
1126
1127 CleanupPadInst *CreateCleanupPad(Value *ParentPad,
1128 ArrayRef<Value *> Args = None,
1129 const Twine &Name = "") {
1130 return Insert(CleanupPadInst::Create(ParentPad, Args), Name);
1131 }
1132
1133 CatchReturnInst *CreateCatchRet(CatchPadInst *CatchPad, BasicBlock *BB) {
1134 return Insert(CatchReturnInst::Create(CatchPad, BB));
1135 }
1136
1137 UnreachableInst *CreateUnreachable() {
1138 return Insert(new UnreachableInst(Context));
1139 }
1140
1141 //===--------------------------------------------------------------------===//
1142 // Instruction creation methods: Binary Operators
1143 //===--------------------------------------------------------------------===//
1144private:
1145 BinaryOperator *CreateInsertNUWNSWBinOp(BinaryOperator::BinaryOps Opc,
1146 Value *LHS, Value *RHS,
1147 const Twine &Name,
1148 bool HasNUW, bool HasNSW) {
1149 BinaryOperator *BO = Insert(BinaryOperator::Create(Opc, LHS, RHS), Name);
1150 if (HasNUW) BO->setHasNoUnsignedWrap();
1151 if (HasNSW) BO->setHasNoSignedWrap();
1152 return BO;
1153 }
1154
1155 Instruction *setFPAttrs(Instruction *I, MDNode *FPMD,
1156 FastMathFlags FMF) const {
1157 if (!FPMD)
1158 FPMD = DefaultFPMathTag;
1159 if (FPMD)
1160 I->setMetadata(LLVMContext::MD_fpmath, FPMD);
1161 I->setFastMathFlags(FMF);
1162 return I;
1163 }
1164
1165 Value *foldConstant(Instruction::BinaryOps Opc, Value *L,
1166 Value *R, const Twine &Name) const {
1167 auto *LC = dyn_cast<Constant>(L);
1168 auto *RC = dyn_cast<Constant>(R);
1169 return (LC && RC) ? Insert(Folder.CreateBinOp(Opc, LC, RC), Name) : nullptr;
1170 }
1171
1172 Value *getConstrainedFPRounding(Optional<fp::RoundingMode> Rounding) {
1173 fp::RoundingMode UseRounding = DefaultConstrainedRounding;
1174
1175 if (Rounding.hasValue())
1176 UseRounding = Rounding.getValue();
1177
1178 Optional<StringRef> RoundingStr = RoundingModeToStr(UseRounding);
1179 assert(RoundingStr.hasValue() && "Garbage strict rounding mode!")((RoundingStr.hasValue() && "Garbage strict rounding mode!"
) ? static_cast<void> (0) : __assert_fail ("RoundingStr.hasValue() && \"Garbage strict rounding mode!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/IRBuilder.h"
, 1179, __PRETTY_FUNCTION__))
;
1180 auto *RoundingMDS = MDString::get(Context, RoundingStr.getValue());
1181
1182 return MetadataAsValue::get(Context, RoundingMDS);
1183 }
1184
1185 Value *getConstrainedFPExcept(Optional<fp::ExceptionBehavior> Except) {
1186 fp::ExceptionBehavior UseExcept = DefaultConstrainedExcept;
1187
1188 if (Except.hasValue())
1189 UseExcept = Except.getValue();
1190
1191 Optional<StringRef> ExceptStr = ExceptionBehaviorToStr(UseExcept);
1192 assert(ExceptStr.hasValue() && "Garbage strict exception behavior!")((ExceptStr.hasValue() && "Garbage strict exception behavior!"
) ? static_cast<void> (0) : __assert_fail ("ExceptStr.hasValue() && \"Garbage strict exception behavior!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/IRBuilder.h"
, 1192, __PRETTY_FUNCTION__))
;
1193 auto *ExceptMDS = MDString::get(Context, ExceptStr.getValue());
1194
1195 return MetadataAsValue::get(Context, ExceptMDS);
1196 }
1197
1198 Value *getConstrainedFPPredicate(CmpInst::Predicate Predicate) {
1199 assert(CmpInst::isFPPredicate(Predicate) &&((CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst
::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE &&
"Invalid constrained FP comparison predicate!") ? static_cast
<void> (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/IRBuilder.h"
, 1202, __PRETTY_FUNCTION__))
1200 Predicate != CmpInst::FCMP_FALSE &&((CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst
::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE &&
"Invalid constrained FP comparison predicate!") ? static_cast
<void> (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/IRBuilder.h"
, 1202, __PRETTY_FUNCTION__))
1201 Predicate != CmpInst::FCMP_TRUE &&((CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst
::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE &&
"Invalid constrained FP comparison predicate!") ? static_cast
<void> (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/IRBuilder.h"
, 1202, __PRETTY_FUNCTION__))
1202 "Invalid constrained FP comparison predicate!")((CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst
::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE &&
"Invalid constrained FP comparison predicate!") ? static_cast
<void> (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/IRBuilder.h"
, 1202, __PRETTY_FUNCTION__))
;
1203
1204 StringRef PredicateStr = CmpInst::getPredicateName(Predicate);
1205 auto *PredicateMDS = MDString::get(Context, PredicateStr);
1206
1207 return MetadataAsValue::get(Context, PredicateMDS);
1208 }
1209
1210public:
1211 Value *CreateAdd(Value *LHS, Value *RHS, const Twine &Name = "",
1212 bool HasNUW = false, bool HasNSW = false) {
1213 if (auto *LC = dyn_cast<Constant>(LHS))
1214 if (auto *RC = dyn_cast<Constant>(RHS))
1215 return Insert(Folder.CreateAdd(LC, RC, HasNUW, HasNSW), Name);
1216 return CreateInsertNUWNSWBinOp(Instruction::Add, LHS, RHS, Name,
1217 HasNUW, HasNSW);
1218 }
1219
1220 Value *CreateNSWAdd(Value *LHS, Value *RHS, const Twine &Name = "") {
1221 return CreateAdd(LHS, RHS, Name, false, true);
1222 }
1223
1224 Value *CreateNUWAdd(Value *LHS, Value *RHS, const Twine &Name = "") {
1225 return CreateAdd(LHS, RHS, Name, true, false);
1226 }
1227
1228 Value *CreateSub(Value *LHS, Value *RHS, const Twine &Name = "",
1229 bool HasNUW = false, bool HasNSW = false) {
1230 if (auto *LC = dyn_cast<Constant>(LHS))
1231 if (auto *RC = dyn_cast<Constant>(RHS))
1232 return Insert(Folder.CreateSub(LC, RC, HasNUW, HasNSW), Name);
1233 return CreateInsertNUWNSWBinOp(Instruction::Sub, LHS, RHS, Name,
1234 HasNUW, HasNSW);
1235 }
1236
1237 Value *CreateNSWSub(Value *LHS, Value *RHS, const Twine &Name = "") {
1238 return CreateSub(LHS, RHS, Name, false, true);
1239 }
1240
1241 Value *CreateNUWSub(Value *LHS, Value *RHS, const Twine &Name = "") {
1242 return CreateSub(LHS, RHS, Name, true, false);
1243 }
1244
1245 Value *CreateMul(Value *LHS, Value *RHS, const Twine &Name = "",
1246 bool HasNUW = false, bool HasNSW = false) {
1247 if (auto *LC = dyn_cast<Constant>(LHS))
1248 if (auto *RC = dyn_cast<Constant>(RHS))
1249 return Insert(Folder.CreateMul(LC, RC, HasNUW, HasNSW), Name);
1250 return CreateInsertNUWNSWBinOp(Instruction::Mul, LHS, RHS, Name,
1251 HasNUW, HasNSW);
1252 }
1253
1254 Value *CreateNSWMul(Value *LHS, Value *RHS, const Twine &Name = "") {
1255 return CreateMul(LHS, RHS, Name, false, true);
1256 }
1257
1258 Value *CreateNUWMul(Value *LHS, Value *RHS, const Twine &Name = "") {
1259 return CreateMul(LHS, RHS, Name, true, false);
1260 }
1261
1262 Value *CreateUDiv(Value *LHS, Value *RHS, const Twine &Name = "",
1263 bool isExact = false) {
1264 if (auto *LC = dyn_cast<Constant>(LHS))
1265 if (auto *RC = dyn_cast<Constant>(RHS))
1266 return Insert(Folder.CreateUDiv(LC, RC, isExact), Name);
1267 if (!isExact)
1268 return Insert(BinaryOperator::CreateUDiv(LHS, RHS), Name);
1269 return Insert(BinaryOperator::CreateExactUDiv(LHS, RHS), Name);
1270 }
1271
1272 Value *CreateExactUDiv(Value *LHS, Value *RHS, const Twine &Name = "") {
1273 return CreateUDiv(LHS, RHS, Name, true);
1274 }
1275
1276 Value *CreateSDiv(Value *LHS, Value *RHS, const Twine &Name = "",
1277 bool isExact = false) {
1278 if (auto *LC = dyn_cast<Constant>(LHS))
1279 if (auto *RC = dyn_cast<Constant>(RHS))
1280 return Insert(Folder.CreateSDiv(LC, RC, isExact), Name);
1281 if (!isExact)
1282 return Insert(BinaryOperator::CreateSDiv(LHS, RHS), Name);
1283 return Insert(BinaryOperator::CreateExactSDiv(LHS, RHS), Name);
1284 }
1285
1286 Value *CreateExactSDiv(Value *LHS, Value *RHS, const Twine &Name = "") {
1287 return CreateSDiv(LHS, RHS, Name, true);
1288 }
1289
1290 Value *CreateURem(Value *LHS, Value *RHS, const Twine &Name = "") {
1291 if (Value *V = foldConstant(Instruction::URem, LHS, RHS, Name)) return V;
1292 return Insert(BinaryOperator::CreateURem(LHS, RHS), Name);
1293 }
1294
1295 Value *CreateSRem(Value *LHS, Value *RHS, const Twine &Name = "") {
1296 if (Value *V = foldConstant(Instruction::SRem, LHS, RHS, Name)) return V;
1297 return Insert(BinaryOperator::CreateSRem(LHS, RHS), Name);
1298 }
1299
1300 Value *CreateShl(Value *LHS, Value *RHS, const Twine &Name = "",
1301 bool HasNUW = false, bool HasNSW = false) {
1302 if (auto *LC = dyn_cast<Constant>(LHS))
1303 if (auto *RC = dyn_cast<Constant>(RHS))
1304 return Insert(Folder.CreateShl(LC, RC, HasNUW, HasNSW), Name);
1305 return CreateInsertNUWNSWBinOp(Instruction::Shl, LHS, RHS, Name,
1306 HasNUW, HasNSW);
1307 }
1308
1309 Value *CreateShl(Value *LHS, const APInt &RHS, const Twine &Name = "",
1310 bool HasNUW = false, bool HasNSW = false) {
1311 return CreateShl(LHS, ConstantInt::get(LHS->getType(), RHS), Name,
1312 HasNUW, HasNSW);
1313 }
1314
1315 Value *CreateShl(Value *LHS, uint64_t RHS, const Twine &Name = "",
1316 bool HasNUW = false, bool HasNSW = false) {
1317 return CreateShl(LHS, ConstantInt::get(LHS->getType(), RHS), Name,
1318 HasNUW, HasNSW);
1319 }
1320
1321 Value *CreateLShr(Value *LHS, Value *RHS, const Twine &Name = "",
1322 bool isExact = false) {
1323 if (auto *LC = dyn_cast<Constant>(LHS))
1324 if (auto *RC = dyn_cast<Constant>(RHS))
1325 return Insert(Folder.CreateLShr(LC, RC, isExact), Name);
1326 if (!isExact)
1327 return Insert(BinaryOperator::CreateLShr(LHS, RHS), Name);
1328 return Insert(BinaryOperator::CreateExactLShr(LHS, RHS), Name);
1329 }
1330
1331 Value *CreateLShr(Value *LHS, const APInt &RHS, const Twine &Name = "",
1332 bool isExact = false) {
1333 return CreateLShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
1334 }
1335
1336 Value *CreateLShr(Value *LHS, uint64_t RHS, const Twine &Name = "",
1337 bool isExact = false) {
1338 return CreateLShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
1339 }
1340
1341 Value *CreateAShr(Value *LHS, Value *RHS, const Twine &Name = "",
1342 bool isExact = false) {
1343 if (auto *LC = dyn_cast<Constant>(LHS))
1344 if (auto *RC = dyn_cast<Constant>(RHS))
1345 return Insert(Folder.CreateAShr(LC, RC, isExact), Name);
1346 if (!isExact)
1347 return Insert(BinaryOperator::CreateAShr(LHS, RHS), Name);
1348 return Insert(BinaryOperator::CreateExactAShr(LHS, RHS), Name);
1349 }
1350
1351 Value *CreateAShr(Value *LHS, const APInt &RHS, const Twine &Name = "",
1352 bool isExact = false) {
1353 return CreateAShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
1354 }
1355
1356 Value *CreateAShr(Value *LHS, uint64_t RHS, const Twine &Name = "",
1357 bool isExact = false) {
1358 return CreateAShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
1359 }
1360
1361 Value *CreateAnd(Value *LHS, Value *RHS, const Twine &Name = "") {
1362 if (auto *RC = dyn_cast<Constant>(RHS)) {
1363 if (isa<ConstantInt>(RC) && cast<ConstantInt>(RC)->isMinusOne())
1364 return LHS; // LHS & -1 -> LHS
1365 if (auto *LC = dyn_cast<Constant>(LHS))
1366 return Insert(Folder.CreateAnd(LC, RC), Name);
1367 }
1368 return Insert(BinaryOperator::CreateAnd(LHS, RHS), Name);
1369 }
1370
1371 Value *CreateAnd(Value *LHS, const APInt &RHS, const Twine &Name = "") {
1372 return CreateAnd(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1373 }
1374
1375 Value *CreateAnd(Value *LHS, uint64_t RHS, const Twine &Name = "") {
1376 return CreateAnd(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1377 }
1378
1379 Value *CreateAnd(ArrayRef<Value*> Ops) {
1380 assert(!Ops.empty())((!Ops.empty()) ? static_cast<void> (0) : __assert_fail
("!Ops.empty()", "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/IRBuilder.h"
, 1380, __PRETTY_FUNCTION__))
;
1381 Value *Accum = Ops[0];
1382 for (unsigned i = 1; i < Ops.size(); i++)
1383 Accum = CreateAnd(Accum, Ops[i]);
1384 return Accum;
1385 }
1386
1387 Value *CreateOr(Value *LHS, Value *RHS, const Twine &Name = "") {
1388 if (auto *RC = dyn_cast<Constant>(RHS)) {
1389 if (RC->isNullValue())
1390 return LHS; // LHS | 0 -> LHS
1391 if (auto *LC = dyn_cast<Constant>(LHS))
1392 return Insert(Folder.CreateOr(LC, RC), Name);
1393 }
1394 return Insert(BinaryOperator::CreateOr(LHS, RHS), Name);
1395 }
1396
1397 Value *CreateOr(Value *LHS, const APInt &RHS, const Twine &Name = "") {
1398 return CreateOr(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1399 }
1400
1401 Value *CreateOr(Value *LHS, uint64_t RHS, const Twine &Name = "") {
1402 return CreateOr(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1403 }
1404
1405 Value *CreateOr(ArrayRef<Value*> Ops) {
1406 assert(!Ops.empty())((!Ops.empty()) ? static_cast<void> (0) : __assert_fail
("!Ops.empty()", "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/IRBuilder.h"
, 1406, __PRETTY_FUNCTION__))
;
1407 Value *Accum = Ops[0];
1408 for (unsigned i = 1; i < Ops.size(); i++)
1409 Accum = CreateOr(Accum, Ops[i]);
1410 return Accum;
1411 }
1412
1413 Value *CreateXor(Value *LHS, Value *RHS, const Twine &Name = "") {
1414 if (Value *V = foldConstant(Instruction::Xor, LHS, RHS, Name)) return V;
1415 return Insert(BinaryOperator::CreateXor(LHS, RHS), Name);
1416 }
1417
1418 Value *CreateXor(Value *LHS, const APInt &RHS, const Twine &Name = "") {
1419 return CreateXor(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1420 }
1421
1422 Value *CreateXor(Value *LHS, uint64_t RHS, const Twine &Name = "") {
1423 return CreateXor(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1424 }
1425
1426 Value *CreateFAdd(Value *L, Value *R, const Twine &Name = "",
1427 MDNode *FPMD = nullptr) {
1428 if (IsFPConstrained)
1429 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fadd,
1430 L, R, nullptr, Name, FPMD);
1431
1432 if (Value *V = foldConstant(Instruction::FAdd, L, R, Name)) return V;
1433 Instruction *I = setFPAttrs(BinaryOperator::CreateFAdd(L, R), FPMD, FMF);
1434 return Insert(I, Name);
1435 }
1436
1437 /// Copy fast-math-flags from an instruction rather than using the builder's
1438 /// default FMF.
1439 Value *CreateFAddFMF(Value *L, Value *R, Instruction *FMFSource,
1440 const Twine &Name = "") {
1441 if (IsFPConstrained)
1442 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fadd,
1443 L, R, FMFSource, Name);
1444
1445 if (Value *V = foldConstant(Instruction::FAdd, L, R, Name)) return V;
1446 Instruction *I = setFPAttrs(BinaryOperator::CreateFAdd(L, R), nullptr,
1447 FMFSource->getFastMathFlags());
1448 return Insert(I, Name);
1449 }
1450
1451 Value *CreateFSub(Value *L, Value *R, const Twine &Name = "",
1452 MDNode *FPMD = nullptr) {
1453 if (IsFPConstrained)
1454 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fsub,
1455 L, R, nullptr, Name, FPMD);
1456
1457 if (Value *V = foldConstant(Instruction::FSub, L, R, Name)) return V;
1458 Instruction *I = setFPAttrs(BinaryOperator::CreateFSub(L, R), FPMD, FMF);
1459 return Insert(I, Name);
1460 }
1461
1462 /// Copy fast-math-flags from an instruction rather than using the builder's
1463 /// default FMF.
1464 Value *CreateFSubFMF(Value *L, Value *R, Instruction *FMFSource,
1465 const Twine &Name = "") {
1466 if (IsFPConstrained)
1467 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fsub,
1468 L, R, FMFSource, Name);
1469
1470 if (Value *V = foldConstant(Instruction::FSub, L, R, Name)) return V;
1471 Instruction *I = setFPAttrs(BinaryOperator::CreateFSub(L, R), nullptr,
1472 FMFSource->getFastMathFlags());
1473 return Insert(I, Name);
1474 }
1475
1476 Value *CreateFMul(Value *L, Value *R, const Twine &Name = "",
1477 MDNode *FPMD = nullptr) {
1478 if (IsFPConstrained)
1479 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fmul,
1480 L, R, nullptr, Name, FPMD);
1481
1482 if (Value *V = foldConstant(Instruction::FMul, L, R, Name)) return V;
1483 Instruction *I = setFPAttrs(BinaryOperator::CreateFMul(L, R), FPMD, FMF);
1484 return Insert(I, Name);
1485 }
1486
1487 /// Copy fast-math-flags from an instruction rather than using the builder's
1488 /// default FMF.
1489 Value *CreateFMulFMF(Value *L, Value *R, Instruction *FMFSource,
1490 const Twine &Name = "") {
1491 if (IsFPConstrained)
1492 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fmul,
1493 L, R, FMFSource, Name);
1494
1495 if (Value *V = foldConstant(Instruction::FMul, L, R, Name)) return V;
1496 Instruction *I = setFPAttrs(BinaryOperator::CreateFMul(L, R), nullptr,
1497 FMFSource->getFastMathFlags());
1498 return Insert(I, Name);
1499 }
1500
1501 Value *CreateFDiv(Value *L, Value *R, const Twine &Name = "",
1502 MDNode *FPMD = nullptr) {
1503 if (IsFPConstrained)
1504 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fdiv,
1505 L, R, nullptr, Name, FPMD);
1506
1507 if (Value *V = foldConstant(Instruction::FDiv, L, R, Name)) return V;
1508 Instruction *I = setFPAttrs(BinaryOperator::CreateFDiv(L, R), FPMD, FMF);
1509 return Insert(I, Name);
1510 }
1511
1512 /// Copy fast-math-flags from an instruction rather than using the builder's
1513 /// default FMF.
1514 Value *CreateFDivFMF(Value *L, Value *R, Instruction *FMFSource,
1515 const Twine &Name = "") {
1516 if (IsFPConstrained)
1517 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fdiv,
1518 L, R, FMFSource, Name);
1519
1520 if (Value *V = foldConstant(Instruction::FDiv, L, R, Name)) return V;
1521 Instruction *I = setFPAttrs(BinaryOperator::CreateFDiv(L, R), nullptr,
1522 FMFSource->getFastMathFlags());
1523 return Insert(I, Name);
1524 }
1525
1526 Value *CreateFRem(Value *L, Value *R, const Twine &Name = "",
1527 MDNode *FPMD = nullptr) {
1528 if (IsFPConstrained)
1529 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_frem,
1530 L, R, nullptr, Name, FPMD);
1531
1532 if (Value *V = foldConstant(Instruction::FRem, L, R, Name)) return V;
1533 Instruction *I = setFPAttrs(BinaryOperator::CreateFRem(L, R), FPMD, FMF);
1534 return Insert(I, Name);
1535 }
1536
1537 /// Copy fast-math-flags from an instruction rather than using the builder's
1538 /// default FMF.
1539 Value *CreateFRemFMF(Value *L, Value *R, Instruction *FMFSource,
1540 const Twine &Name = "") {
1541 if (IsFPConstrained)
1542 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_frem,
1543 L, R, FMFSource, Name);
1544
1545 if (Value *V = foldConstant(Instruction::FRem, L, R, Name)) return V;
1546 Instruction *I = setFPAttrs(BinaryOperator::CreateFRem(L, R), nullptr,
1547 FMFSource->getFastMathFlags());
1548 return Insert(I, Name);
1549 }
1550
1551 Value *CreateBinOp(Instruction::BinaryOps Opc,
1552 Value *LHS, Value *RHS, const Twine &Name = "",
1553 MDNode *FPMathTag = nullptr) {
1554 if (Value *V = foldConstant(Opc, LHS, RHS, Name)) return V;
1555 Instruction *BinOp = BinaryOperator::Create(Opc, LHS, RHS);
1556 if (isa<FPMathOperator>(BinOp))
1557 setFPAttrs(BinOp, FPMathTag, FMF);
1558 return Insert(BinOp, Name);
1559 }
1560
1561 CallInst *CreateConstrainedFPBinOp(
1562 Intrinsic::ID ID, Value *L, Value *R, Instruction *FMFSource = nullptr,
1563 const Twine &Name = "", MDNode *FPMathTag = nullptr,
1564 Optional<fp::RoundingMode> Rounding = None,
1565 Optional<fp::ExceptionBehavior> Except = None) {
1566 Value *RoundingV = getConstrainedFPRounding(Rounding);
1567 Value *ExceptV = getConstrainedFPExcept(Except);
1568
1569 FastMathFlags UseFMF = FMF;
1570 if (FMFSource)
1571 UseFMF = FMFSource->getFastMathFlags();
1572
1573 CallInst *C = CreateIntrinsic(ID, {L->getType()},
1574 {L, R, RoundingV, ExceptV}, nullptr, Name);
1575 setConstrainedFPCallAttr(C);
1576 setFPAttrs(C, FPMathTag, UseFMF);
1577 return C;
1578 }
1579
1580 Value *CreateNeg(Value *V, const Twine &Name = "",
1581 bool HasNUW = false, bool HasNSW = false) {
1582 if (auto *VC = dyn_cast<Constant>(V))
1583 return Insert(Folder.CreateNeg(VC, HasNUW, HasNSW), Name);
1584 BinaryOperator *BO = Insert(BinaryOperator::CreateNeg(V), Name);
1585 if (HasNUW) BO->setHasNoUnsignedWrap();
1586 if (HasNSW) BO->setHasNoSignedWrap();
1587 return BO;
1588 }
1589
1590 Value *CreateNSWNeg(Value *V, const Twine &Name = "") {
1591 return CreateNeg(V, Name, false, true);
1592 }
1593
1594 Value *CreateNUWNeg(Value *V, const Twine &Name = "") {
1595 return CreateNeg(V, Name, true, false);
1596 }
1597
1598 Value *CreateFNeg(Value *V, const Twine &Name = "",
1599 MDNode *FPMathTag = nullptr) {
1600 if (auto *VC = dyn_cast<Constant>(V))
1601 return Insert(Folder.CreateFNeg(VC), Name);
1602 return Insert(setFPAttrs(UnaryOperator::CreateFNeg(V), FPMathTag, FMF),
1603 Name);
1604 }
1605
1606 /// Copy fast-math-flags from an instruction rather than using the builder's
1607 /// default FMF.
1608 Value *CreateFNegFMF(Value *V, Instruction *FMFSource,
1609 const Twine &Name = "") {
1610 if (auto *VC = dyn_cast<Constant>(V))
1611 return Insert(Folder.CreateFNeg(VC), Name);
1612 return Insert(setFPAttrs(UnaryOperator::CreateFNeg(V), nullptr,
1613 FMFSource->getFastMathFlags()),
1614 Name);
1615 }
1616
1617 Value *CreateNot(Value *V, const Twine &Name = "") {
1618 if (auto *VC = dyn_cast<Constant>(V))
1619 return Insert(Folder.CreateNot(VC), Name);
1620 return Insert(BinaryOperator::CreateNot(V), Name);
1621 }
1622
1623 Value *CreateUnOp(Instruction::UnaryOps Opc,
1624 Value *V, const Twine &Name = "",
1625 MDNode *FPMathTag = nullptr) {
1626 if (auto *VC = dyn_cast<Constant>(V))
1627 return Insert(Folder.CreateUnOp(Opc, VC), Name);
1628 Instruction *UnOp = UnaryOperator::Create(Opc, V);
1629 if (isa<FPMathOperator>(UnOp))
1630 setFPAttrs(UnOp, FPMathTag, FMF);
1631 return Insert(UnOp, Name);
1632 }
1633
1634 /// Create either a UnaryOperator or BinaryOperator depending on \p Opc.
1635 /// Correct number of operands must be passed accordingly.
1636 Value *CreateNAryOp(unsigned Opc, ArrayRef<Value *> Ops,
1637 const Twine &Name = "",
1638 MDNode *FPMathTag = nullptr) {
1639 if (Instruction::isBinaryOp(Opc)) {
1640 assert(Ops.size() == 2 && "Invalid number of operands!")((Ops.size() == 2 && "Invalid number of operands!") ?
static_cast<void> (0) : __assert_fail ("Ops.size() == 2 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/IRBuilder.h"
, 1640, __PRETTY_FUNCTION__))
;
1641 return CreateBinOp(static_cast<Instruction::BinaryOps>(Opc),
1642 Ops[0], Ops[1], Name, FPMathTag);
1643 }
1644 if (Instruction::isUnaryOp(Opc)) {
1645 assert(Ops.size() == 1 && "Invalid number of operands!")((Ops.size() == 1 && "Invalid number of operands!") ?
static_cast<void> (0) : __assert_fail ("Ops.size() == 1 && \"Invalid number of operands!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/IRBuilder.h"
, 1645, __PRETTY_FUNCTION__))
;
1646 return CreateUnOp(static_cast<Instruction::UnaryOps>(Opc),
1647 Ops[0], Name, FPMathTag);
1648 }
1649 llvm_unreachable("Unexpected opcode!")::llvm::llvm_unreachable_internal("Unexpected opcode!", "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/IRBuilder.h"
, 1649)
;
1650 }
1651
1652 //===--------------------------------------------------------------------===//
1653 // Instruction creation methods: Memory Instructions
1654 //===--------------------------------------------------------------------===//
1655
1656 AllocaInst *CreateAlloca(Type *Ty, unsigned AddrSpace,
1657 Value *ArraySize = nullptr, const Twine &Name = "") {
1658 return Insert(new AllocaInst(Ty, AddrSpace, ArraySize), Name);
1659 }
1660
1661 AllocaInst *CreateAlloca(Type *Ty, Value *ArraySize = nullptr,
1662 const Twine &Name = "") {
1663 const DataLayout &DL = BB->getParent()->getParent()->getDataLayout();
1664 return Insert(new AllocaInst(Ty, DL.getAllocaAddrSpace(), ArraySize), Name);
1665 }
1666
1667 /// Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of
1668 /// converting the string to 'bool' for the isVolatile parameter.
1669 LoadInst *CreateLoad(Type *Ty, Value *Ptr, const char *Name) {
1670 return Insert(new LoadInst(Ty, Ptr), Name);
1671 }
1672
1673 LoadInst *CreateLoad(Type *Ty, Value *Ptr, const Twine &Name = "") {
1674 return Insert(new LoadInst(Ty, Ptr), Name);
1675 }
1676
1677 LoadInst *CreateLoad(Type *Ty, Value *Ptr, bool isVolatile,
1678 const Twine &Name = "") {
1679 return Insert(new LoadInst(Ty, Ptr, Twine(), isVolatile), Name);
1680 }
1681
1682 // Deprecated [opaque pointer types]
1683 LoadInst *CreateLoad(Value *Ptr, const char *Name) {
1684 return CreateLoad(Ptr->getType()->getPointerElementType(), Ptr, Name);
1685 }
1686
1687 // Deprecated [opaque pointer types]
1688 LoadInst *CreateLoad(Value *Ptr, const Twine &Name = "") {
1689 return CreateLoad(Ptr->getType()->getPointerElementType(), Ptr, Name);
1690 }
1691
1692 // Deprecated [opaque pointer types]
1693 LoadInst *CreateLoad(Value *Ptr, bool isVolatile, const Twine &Name = "") {
1694 return CreateLoad(Ptr->getType()->getPointerElementType(), Ptr, isVolatile,
1695 Name);
1696 }
1697
1698 StoreInst *CreateStore(Value *Val, Value *Ptr, bool isVolatile = false) {
1699 return Insert(new StoreInst(Val, Ptr, isVolatile));
1700 }
1701
1702 /// Provided to resolve 'CreateAlignedLoad(Ptr, Align, "...")'
1703 /// correctly, instead of converting the string to 'bool' for the isVolatile
1704 /// parameter.
1705 /// FIXME: Remove this function once transition to Align is over.
1706 /// Use the version that takes MaybeAlign instead of this one.
1707 LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned Align,
1708 const char *Name) {
1709 return CreateAlignedLoad(Ty, Ptr, MaybeAlign(Align), Name);
1710 }
1711 LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align,
1712 const char *Name) {
1713 LoadInst *LI = CreateLoad(Ty, Ptr, Name);
1714 LI->setAlignment(Align);
1715 return LI;
1716 }
1717 /// FIXME: Remove this function once transition to Align is over.
1718 /// Use the version that takes MaybeAlign instead of this one.
1719 LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned Align,
1720 const Twine &Name = "") {
1721 return CreateAlignedLoad(Ty, Ptr, MaybeAlign(Align), Name);
1722 }
1723 LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align,
1724 const Twine &Name = "") {
1725 LoadInst *LI = CreateLoad(Ty, Ptr, Name);
1726 LI->setAlignment(Align);
1727 return LI;
1728 }
1729 /// FIXME: Remove this function once transition to Align is over.
1730 /// Use the version that takes MaybeAlign instead of this one.
1731 LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned Align,
1732 bool isVolatile, const Twine &Name = "") {
1733 return CreateAlignedLoad(Ty, Ptr, MaybeAlign(Align), isVolatile, Name);
1734 }
1735 LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align,
1736 bool isVolatile, const Twine &Name = "") {
1737 LoadInst *LI = CreateLoad(Ty, Ptr, isVolatile, Name);
1738 LI->setAlignment(Align);
1739 return LI;
1740 }
1741
1742 // Deprecated [opaque pointer types]
1743 LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, const char *Name) {
1744 return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
1745 Align, Name);
1746 }
1747 // Deprecated [opaque pointer types]
1748 LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align,
1749 const Twine &Name = "") {
1750 return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
1751 Align, Name);
1752 }
1753 // Deprecated [opaque pointer types]
1754 LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, bool isVolatile,
1755 const Twine &Name = "") {
1756 return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
1757 Align, isVolatile, Name);
1758 }
1759 // Deprecated [opaque pointer types]
1760 LoadInst *CreateAlignedLoad(Value *Ptr, MaybeAlign Align, const char *Name) {
1761 return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
1762 Align, Name);
1763 }
1764 // Deprecated [opaque pointer types]
1765 LoadInst *CreateAlignedLoad(Value *Ptr, MaybeAlign Align,
1766 const Twine &Name = "") {
1767 return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
1768 Align, Name);
1769 }
1770 // Deprecated [opaque pointer types]
1771 LoadInst *CreateAlignedLoad(Value *Ptr, MaybeAlign Align, bool isVolatile,
1772 const Twine &Name = "") {
1773 return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
1774 Align, isVolatile, Name);
1775 }
1776
1777 StoreInst *CreateAlignedStore(Value *Val, Value *Ptr, unsigned Align,
1778 bool isVolatile = false) {
1779 StoreInst *SI = CreateStore(Val, Ptr, isVolatile);
1780 SI->setAlignment(MaybeAlign(Align));
1781 return SI;
1782 }
1783 StoreInst *CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align,
1784 bool isVolatile = false) {
1785 return CreateAlignedStore(Val, Ptr, Align ? Align->value() : 0, isVolatile);
1786 }
1787 FenceInst *CreateFence(AtomicOrdering Ordering,
1788 SyncScope::ID SSID = SyncScope::System,
1789 const Twine &Name = "") {
1790 return Insert(new FenceInst(Context, Ordering, SSID), Name);
1791 }
1792
1793 AtomicCmpXchgInst *
1794 CreateAtomicCmpXchg(Value *Ptr, Value *Cmp, Value *New,
1795 AtomicOrdering SuccessOrdering,
1796 AtomicOrdering FailureOrdering,
1797 SyncScope::ID SSID = SyncScope::System) {
1798 return Insert(new AtomicCmpXchgInst(Ptr, Cmp, New, SuccessOrdering,
1799 FailureOrdering, SSID));
1800 }
1801
1802 AtomicRMWInst *CreateAtomicRMW(AtomicRMWInst::BinOp Op, Value *Ptr, Value *Val,
1803 AtomicOrdering Ordering,
1804 SyncScope::ID SSID = SyncScope::System) {
1805 return Insert(new AtomicRMWInst(Op, Ptr, Val, Ordering, SSID));
1806 }
1807
1808 Value *CreateGEP(Value *Ptr, ArrayRef<Value *> IdxList,
1809 const Twine &Name = "") {
1810 return CreateGEP(nullptr, Ptr, IdxList, Name);
1811 }
1812
1813 Value *CreateGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList,
1814 const Twine &Name = "") {
1815 if (auto *PC = dyn_cast<Constant>(Ptr)) {
1816 // Every index must be constant.
1817 size_t i, e;
1818 for (i = 0, e = IdxList.size(); i != e; ++i)
1819 if (!isa<Constant>(IdxList[i]))
1820 break;
1821 if (i == e)
1822 return Insert(Folder.CreateGetElementPtr(Ty, PC, IdxList), Name);
1823 }
1824 return Insert(GetElementPtrInst::Create(Ty, Ptr, IdxList), Name);
1825 }
1826
1827 Value *CreateInBoundsGEP(Value *Ptr, ArrayRef<Value *> IdxList,
1828 const Twine &Name = "") {
1829 return CreateInBoundsGEP(nullptr, Ptr, IdxList, Name);
1830 }
1831
1832 Value *CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList,
1833 const Twine &Name = "") {
1834 if (auto *PC = dyn_cast<Constant>(Ptr)) {
1835 // Every index must be constant.
1836 size_t i, e;
1837 for (i = 0, e = IdxList.size(); i != e; ++i)
1838 if (!isa<Constant>(IdxList[i]))
1839 break;
1840 if (i == e)
1841 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, IdxList),
1842 Name);
1843 }
1844 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, IdxList), Name);
1845 }
1846
1847 Value *CreateGEP(Value *Ptr, Value *Idx, const Twine &Name = "") {
1848 return CreateGEP(nullptr, Ptr, Idx, Name);
1849 }
1850
1851 Value *CreateGEP(Type *Ty, Value *Ptr, Value *Idx, const Twine &Name = "") {
1852 if (auto *PC
30.1
'PC' is non-null
30.1
'PC' is non-null
= dyn_cast<Constant>(Ptr))
30
Assuming 'Ptr' is a 'Constant'
31
Taking true branch
1853 if (auto *IC
32.1
'IC' is non-null
32.1
'IC' is non-null
= dyn_cast<Constant>(Idx))
32
Assuming 'Idx' is a 'Constant'
33
Taking true branch
1854 return Insert(Folder.CreateGetElementPtr(Ty, PC, IC), Name);
34
Passing value via 1st parameter 'C'
35
Returning pointer
1855 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name);
1856 }
1857
1858 Value *CreateInBoundsGEP(Type *Ty, Value *Ptr, Value *Idx,
1859 const Twine &Name = "") {
1860 if (auto *PC = dyn_cast<Constant>(Ptr))
1861 if (auto *IC = dyn_cast<Constant>(Idx))
1862 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, IC), Name);
1863 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name);
1864 }
1865
1866 Value *CreateConstGEP1_32(Value *Ptr, unsigned Idx0, const Twine &Name = "") {
1867 return CreateConstGEP1_32(nullptr, Ptr, Idx0, Name);
1868 }
1869
1870 Value *CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0,
1871 const Twine &Name = "") {
1872 Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), Idx0);
1873
1874 if (auto *PC = dyn_cast<Constant>(Ptr))
1875 return Insert(Folder.CreateGetElementPtr(Ty, PC, Idx), Name);
1876
1877 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name);
1878 }
1879
1880 Value *CreateConstInBoundsGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0,
1881 const Twine &Name = "") {
1882 Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), Idx0);
1883
1884 if (auto *PC = dyn_cast<Constant>(Ptr))
1885 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idx), Name);
1886
1887 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name);
1888 }
1889
1890 Value *CreateConstGEP2_32(Type *Ty, Value *Ptr, unsigned Idx0, unsigned Idx1,
1891 const Twine &Name = "") {
1892 Value *Idxs[] = {
1893 ConstantInt::get(Type::getInt32Ty(Context), Idx0),
1894 ConstantInt::get(Type::getInt32Ty(Context), Idx1)
1895 };
1896
1897 if (auto *PC = dyn_cast<Constant>(Ptr))
1898 return Insert(Folder.CreateGetElementPtr(Ty, PC, Idxs), Name);
1899
1900 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idxs), Name);
1901 }
1902
1903 Value *CreateConstInBoundsGEP2_32(Type *Ty, Value *Ptr, unsigned Idx0,
1904 unsigned Idx1, const Twine &Name = "") {
1905 Value *Idxs[] = {
1906 ConstantInt::get(Type::getInt32Ty(Context), Idx0),
1907 ConstantInt::get(Type::getInt32Ty(Context), Idx1)
1908 };
1909
1910 if (auto *PC = dyn_cast<Constant>(Ptr))
1911 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idxs), Name);
1912
1913 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idxs), Name);
1914 }
1915
1916 Value *CreateConstGEP1_64(Type *Ty, Value *Ptr, uint64_t Idx0,
1917 const Twine &Name = "") {
1918 Value *Idx = ConstantInt::get(Type::getInt64Ty(Context), Idx0);
1919
1920 if (auto *PC = dyn_cast<Constant>(Ptr))
1921 return Insert(Folder.CreateGetElementPtr(Ty, PC, Idx), Name);
1922
1923 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name);
1924 }
1925
1926 Value *CreateConstGEP1_64(Value *Ptr, uint64_t Idx0, const Twine &Name = "") {
1927 return CreateConstGEP1_64(nullptr, Ptr, Idx0, Name);
1928 }
1929
1930 Value *CreateConstInBoundsGEP1_64(Type *Ty, Value *Ptr, uint64_t Idx0,
1931 const Twine &Name = "") {
1932 Value *Idx = ConstantInt::get(Type::getInt64Ty(Context), Idx0);
1933
1934 if (auto *PC = dyn_cast<Constant>(Ptr))
1935 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idx), Name);
1936
1937 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name);
1938 }
1939
1940 Value *CreateConstInBoundsGEP1_64(Value *Ptr, uint64_t Idx0,
1941 const Twine &Name = "") {
1942 return CreateConstInBoundsGEP1_64(nullptr, Ptr, Idx0, Name);
1943 }
1944
1945 Value *CreateConstGEP2_64(Type *Ty, Value *Ptr, uint64_t Idx0, uint64_t Idx1,
1946 const Twine &Name = "") {
1947 Value *Idxs[] = {
1948 ConstantInt::get(Type::getInt64Ty(Context), Idx0),
1949 ConstantInt::get(Type::getInt64Ty(Context), Idx1)
1950 };
1951
1952 if (auto *PC = dyn_cast<Constant>(Ptr))
1953 return Insert(Folder.CreateGetElementPtr(Ty, PC, Idxs), Name);
1954
1955 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idxs), Name);
1956 }
1957
1958 Value *CreateConstGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t Idx1,
1959 const Twine &Name = "") {
1960 return CreateConstGEP2_64(nullptr, Ptr, Idx0, Idx1, Name);
1961 }
1962
1963 Value *CreateConstInBoundsGEP2_64(Type *Ty, Value *Ptr, uint64_t Idx0,
1964 uint64_t Idx1, const Twine &Name = "") {
1965 Value *Idxs[] = {
1966 ConstantInt::get(Type::getInt64Ty(Context), Idx0),
1967 ConstantInt::get(Type::getInt64Ty(Context), Idx1)
1968 };
1969
1970 if (auto *PC = dyn_cast<Constant>(Ptr))
1971 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idxs), Name);
1972
1973 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idxs), Name);
1974 }
1975
1976 Value *CreateConstInBoundsGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t Idx1,
1977 const Twine &Name = "") {
1978 return CreateConstInBoundsGEP2_64(nullptr, Ptr, Idx0, Idx1, Name);
1979 }
1980
1981 Value *CreateStructGEP(Type *Ty, Value *Ptr, unsigned Idx,
1982 const Twine &Name = "") {
1983 return CreateConstInBoundsGEP2_32(Ty, Ptr, 0, Idx, Name);
1984 }
1985
1986 Value *CreateStructGEP(Value *Ptr, unsigned Idx, const Twine &Name = "") {
1987 return CreateConstInBoundsGEP2_32(nullptr, Ptr, 0, Idx, Name);
1988 }
1989
1990 /// Same as CreateGlobalString, but return a pointer with "i8*" type
1991 /// instead of a pointer to array of i8.
1992 Constant *CreateGlobalStringPtr(StringRef Str, const Twine &Name = "",
1993 unsigned AddressSpace = 0) {
1994 GlobalVariable *GV = CreateGlobalString(Str, Name, AddressSpace);
1995 Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0);
1996 Constant *Indices[] = {Zero, Zero};
1997 return ConstantExpr::getInBoundsGetElementPtr(GV->getValueType(), GV,
1998 Indices);
1999 }
2000
2001 //===--------------------------------------------------------------------===//
2002 // Instruction creation methods: Cast/Conversion Operators
2003 //===--------------------------------------------------------------------===//
2004
2005 Value *CreateTrunc(Value *V, Type *DestTy, const Twine &Name = "") {
2006 return CreateCast(Instruction::Trunc, V, DestTy, Name);
2007 }
2008
2009 Value *CreateZExt(Value *V, Type *DestTy, const Twine &Name = "") {
2010 return CreateCast(Instruction::ZExt, V, DestTy, Name);
2011 }
2012
2013 Value *CreateSExt(Value *V, Type *DestTy, const Twine &Name = "") {
2014 return CreateCast(Instruction::SExt, V, DestTy, Name);
2015 }
2016
2017 /// Create a ZExt or Trunc from the integer value V to DestTy. Return
2018 /// the value untouched if the type of V is already DestTy.
2019 Value *CreateZExtOrTrunc(Value *V, Type *DestTy,
2020 const Twine &Name = "") {
2021 assert(V->getType()->isIntOrIntVectorTy() &&((V->getType()->isIntOrIntVectorTy() && DestTy->
isIntOrIntVectorTy() && "Can only zero extend/truncate integers!"
) ? static_cast<void> (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only zero extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/IRBuilder.h"
, 2023, __PRETTY_FUNCTION__))
2022 DestTy->isIntOrIntVectorTy() &&((V->getType()->isIntOrIntVectorTy() && DestTy->
isIntOrIntVectorTy() && "Can only zero extend/truncate integers!"
) ? static_cast<void> (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only zero extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/IRBuilder.h"
, 2023, __PRETTY_FUNCTION__))
2023 "Can only zero extend/truncate integers!")((V->getType()->isIntOrIntVectorTy() && DestTy->
isIntOrIntVectorTy() && "Can only zero extend/truncate integers!"
) ? static_cast<void> (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only zero extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/IRBuilder.h"
, 2023, __PRETTY_FUNCTION__))
;
2024 Type *VTy = V->getType();
2025 if (VTy->getScalarSizeInBits() < DestTy->getScalarSizeInBits())
2026 return CreateZExt(V, DestTy, Name);
2027 if (VTy->getScalarSizeInBits() > DestTy->getScalarSizeInBits())
2028 return CreateTrunc(V, DestTy, Name);
2029 return V;
2030 }
2031
2032 /// Create a SExt or Trunc from the integer value V to DestTy. Return
2033 /// the value untouched if the type of V is already DestTy.
2034 Value *CreateSExtOrTrunc(Value *V, Type *DestTy,
2035 const Twine &Name = "") {
2036 assert(V->getType()->isIntOrIntVectorTy() &&((V->getType()->isIntOrIntVectorTy() && DestTy->
isIntOrIntVectorTy() && "Can only sign extend/truncate integers!"
) ? static_cast<void> (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only sign extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/IRBuilder.h"
, 2038, __PRETTY_FUNCTION__))
2037 DestTy->isIntOrIntVectorTy() &&((V->getType()->isIntOrIntVectorTy() && DestTy->
isIntOrIntVectorTy() && "Can only sign extend/truncate integers!"
) ? static_cast<void> (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only sign extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/IRBuilder.h"
, 2038, __PRETTY_FUNCTION__))
2038 "Can only sign extend/truncate integers!")((V->getType()->isIntOrIntVectorTy() && DestTy->
isIntOrIntVectorTy() && "Can only sign extend/truncate integers!"
) ? static_cast<void> (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only sign extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/IRBuilder.h"
, 2038, __PRETTY_FUNCTION__))
;
2039 Type *VTy = V->getType();
2040 if (VTy->getScalarSizeInBits() < DestTy->getScalarSizeInBits())
2041 return CreateSExt(V, DestTy, Name);
2042 if (VTy->getScalarSizeInBits() > DestTy->getScalarSizeInBits())
2043 return CreateTrunc(V, DestTy, Name);
2044 return V;
2045 }
2046
2047 Value *CreateFPToUI(Value *V, Type *DestTy, const Twine &Name = "") {
2048 if (IsFPConstrained)
2049 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fptoui,
2050 V, DestTy, nullptr, Name);
2051 return CreateCast(Instruction::FPToUI, V, DestTy, Name);
2052 }
2053
2054 Value *CreateFPToSI(Value *V, Type *DestTy, const Twine &Name = "") {
2055 if (IsFPConstrained)
2056 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fptosi,
2057 V, DestTy, nullptr, Name);
2058 return CreateCast(Instruction::FPToSI, V, DestTy, Name);
2059 }
2060
2061 Value *CreateUIToFP(Value *V, Type *DestTy, const Twine &Name = ""){
2062 if (IsFPConstrained)
2063 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_uitofp,
2064 V, DestTy, nullptr, Name);
2065 return CreateCast(Instruction::UIToFP, V, DestTy, Name);
2066 }
2067
2068 Value *CreateSIToFP(Value *V, Type *DestTy, const Twine &Name = ""){
2069 if (IsFPConstrained)
2070 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_sitofp,
2071 V, DestTy, nullptr, Name);
2072 return CreateCast(Instruction::SIToFP, V, DestTy, Name);
2073 }
2074
2075 Value *CreateFPTrunc(Value *V, Type *DestTy,
2076 const Twine &Name = "") {
2077 if (IsFPConstrained)
2078 return CreateConstrainedFPCast(
2079 Intrinsic::experimental_constrained_fptrunc, V, DestTy, nullptr,
2080 Name);
2081 return CreateCast(Instruction::FPTrunc, V, DestTy, Name);
2082 }
2083
2084 Value *CreateFPExt(Value *V, Type *DestTy, const Twine &Name = "") {
2085 if (IsFPConstrained)
2086 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fpext,
2087 V, DestTy, nullptr, Name);
2088 return CreateCast(Instruction::FPExt, V, DestTy, Name);
2089 }
2090
2091 Value *CreatePtrToInt(Value *V, Type *DestTy,
2092 const Twine &Name = "") {
2093 return CreateCast(Instruction::PtrToInt, V, DestTy, Name);
2094 }
2095
2096 Value *CreateIntToPtr(Value *V, Type *DestTy,
2097 const Twine &Name = "") {
2098 return CreateCast(Instruction::IntToPtr, V, DestTy, Name);
2099 }
2100
2101 Value *CreateBitCast(Value *V, Type *DestTy,
2102 const Twine &Name = "") {
2103 return CreateCast(Instruction::BitCast, V, DestTy, Name);
2104 }
2105
2106 Value *CreateAddrSpaceCast(Value *V, Type *DestTy,
2107 const Twine &Name = "") {
2108 return CreateCast(Instruction::AddrSpaceCast, V, DestTy, Name);
2109 }
2110
2111 Value *CreateZExtOrBitCast(Value *V, Type *DestTy,
2112 const Twine &Name = "") {
2113 if (V->getType() == DestTy)
2114 return V;
2115 if (auto *VC = dyn_cast<Constant>(V))
2116 return Insert(Folder.CreateZExtOrBitCast(VC, DestTy), Name);
2117 return Insert(CastInst::CreateZExtOrBitCast(V, DestTy), Name);
2118 }
2119
2120 Value *CreateSExtOrBitCast(Value *V, Type *DestTy,
2121 const Twine &Name = "") {
2122 if (V->getType() == DestTy)
2123 return V;
2124 if (auto *VC = dyn_cast<Constant>(V))
2125 return Insert(Folder.CreateSExtOrBitCast(VC, DestTy), Name);
2126 return Insert(CastInst::CreateSExtOrBitCast(V, DestTy), Name);
2127 }
2128
2129 Value *CreateTruncOrBitCast(Value *V, Type *DestTy,
2130 const Twine &Name = "") {
2131 if (V->getType() == DestTy)
2132 return V;
2133 if (auto *VC = dyn_cast<Constant>(V))
2134 return Insert(Folder.CreateTruncOrBitCast(VC, DestTy), Name);
2135 return Insert(CastInst::CreateTruncOrBitCast(V, DestTy), Name);
2136 }
2137
2138 Value *CreateCast(Instruction::CastOps Op, Value *V, Type *DestTy,
2139 const Twine &Name = "") {
2140 if (V->getType() == DestTy)
2141 return V;
2142 if (auto *VC = dyn_cast<Constant>(V))
2143 return Insert(Folder.CreateCast(Op, VC, DestTy), Name);
2144 return Insert(CastInst::Create(Op, V, DestTy), Name);
2145 }
2146
2147 Value *CreatePointerCast(Value *V, Type *DestTy,
2148 const Twine &Name = "") {
2149 if (V->getType() == DestTy)
2150 return V;
2151 if (auto *VC = dyn_cast<Constant>(V))
2152 return Insert(Folder.CreatePointerCast(VC, DestTy), Name);
2153 return Insert(CastInst::CreatePointerCast(V, DestTy), Name);
2154 }
2155
2156 Value *CreatePointerBitCastOrAddrSpaceCast(Value *V, Type *DestTy,
2157 const Twine &Name = "") {
2158 if (V->getType() == DestTy)
2159 return V;
2160
2161 if (auto *VC = dyn_cast<Constant>(V)) {
2162 return Insert(Folder.CreatePointerBitCastOrAddrSpaceCast(VC, DestTy),
2163 Name);
2164 }
2165
2166 return Insert(CastInst::CreatePointerBitCastOrAddrSpaceCast(V, DestTy),
2167 Name);
2168 }
2169
2170 Value *CreateIntCast(Value *V, Type *DestTy, bool isSigned,
2171 const Twine &Name = "") {
2172 if (V->getType() == DestTy)
2173 return V;
2174 if (auto *VC = dyn_cast<Constant>(V))
2175 return Insert(Folder.CreateIntCast(VC, DestTy, isSigned), Name);
2176 return Insert(CastInst::CreateIntegerCast(V, DestTy, isSigned), Name);
2177 }
2178
2179 Value *CreateBitOrPointerCast(Value *V, Type *DestTy,
2180 const Twine &Name = "") {
2181 if (V->getType() == DestTy)
2182 return V;
2183 if (V->getType()->isPtrOrPtrVectorTy() && DestTy->isIntOrIntVectorTy())
2184 return CreatePtrToInt(V, DestTy, Name);
2185 if (V->getType()->isIntOrIntVectorTy() && DestTy->isPtrOrPtrVectorTy())
2186 return CreateIntToPtr(V, DestTy, Name);
2187
2188 return CreateBitCast(V, DestTy, Name);
2189 }
2190
2191 Value *CreateFPCast(Value *V, Type *DestTy, const Twine &Name = "") {
2192 if (V->getType() == DestTy)
2193 return V;
2194 if (auto *VC = dyn_cast<Constant>(V))
2195 return Insert(Folder.CreateFPCast(VC, DestTy), Name);
2196 return Insert(CastInst::CreateFPCast(V, DestTy), Name);
2197 }
2198
2199 CallInst *CreateConstrainedFPCast(
2200 Intrinsic::ID ID, Value *V, Type *DestTy,
2201 Instruction *FMFSource = nullptr, const Twine &Name = "",
2202 MDNode *FPMathTag = nullptr,
2203 Optional<fp::RoundingMode> Rounding = None,
2204 Optional<fp::ExceptionBehavior> Except = None) {
2205 Value *ExceptV = getConstrainedFPExcept(Except);
2206
2207 FastMathFlags UseFMF = FMF;
2208 if (FMFSource)
2209 UseFMF = FMFSource->getFastMathFlags();
2210
2211 CallInst *C;
2212 bool HasRoundingMD = false;
2213 switch (ID) {
2214 default:
2215 break;
2216#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
2217 case Intrinsic::INTRINSIC: \
2218 HasRoundingMD = ROUND_MODE; \
2219 break;
2220#include "llvm/IR/ConstrainedOps.def"
2221 }
2222 if (HasRoundingMD) {
2223 Value *RoundingV = getConstrainedFPRounding(Rounding);
2224 C = CreateIntrinsic(ID, {DestTy, V->getType()}, {V, RoundingV, ExceptV},
2225 nullptr, Name);
2226 } else
2227 C = CreateIntrinsic(ID, {DestTy, V->getType()}, {V, ExceptV}, nullptr,
2228 Name);
2229
2230 setConstrainedFPCallAttr(C);
2231
2232 if (isa<FPMathOperator>(C))
2233 setFPAttrs(C, FPMathTag, UseFMF);
2234 return C;
2235 }
2236
2237 // Provided to resolve 'CreateIntCast(Ptr, Ptr, "...")', giving a
2238 // compile time error, instead of converting the string to bool for the
2239 // isSigned parameter.
2240 Value *CreateIntCast(Value *, Type *, const char *) = delete;
2241
2242 //===--------------------------------------------------------------------===//
2243 // Instruction creation methods: Compare Instructions
2244 //===--------------------------------------------------------------------===//
2245
2246 Value *CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name = "") {
2247 return CreateICmp(ICmpInst::ICMP_EQ, LHS, RHS, Name);
2248 }
2249
2250 Value *CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name = "") {
2251 return CreateICmp(ICmpInst::ICMP_NE, LHS, RHS, Name);
2252 }
2253
2254 Value *CreateICmpUGT(Value *LHS, Value *RHS, const Twine &Name = "") {
2255 return CreateICmp(ICmpInst::ICMP_UGT, LHS, RHS, Name);
2256 }
2257
2258 Value *CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name = "") {
2259 return CreateICmp(ICmpInst::ICMP_UGE, LHS, RHS, Name);
2260 }
2261
2262 Value *CreateICmpULT(Value *LHS, Value *RHS, const Twine &Name = "") {
2263 return CreateICmp(ICmpInst::ICMP_ULT, LHS, RHS, Name);
2264 }
2265
2266 Value *CreateICmpULE(Value *LHS, Value *RHS, const Twine &Name = "") {
2267 return CreateICmp(ICmpInst::ICMP_ULE, LHS, RHS, Name);
2268 }
2269
2270 Value *CreateICmpSGT(Value *LHS, Value *RHS, const Twine &Name = "") {
2271 return CreateICmp(ICmpInst::ICMP_SGT, LHS, RHS, Name);
2272 }
2273
2274 Value *CreateICmpSGE(Value *LHS, Value *RHS, const Twine &Name = "") {
2275 return CreateICmp(ICmpInst::ICMP_SGE, LHS, RHS, Name);
2276 }
2277
2278 Value *CreateICmpSLT(Value *LHS, Value *RHS, const Twine &Name = "") {
2279 return CreateICmp(ICmpInst::ICMP_SLT, LHS, RHS, Name);
2280 }
2281
2282 Value *CreateICmpSLE(Value *LHS, Value *RHS, const Twine &Name = "") {
2283 return CreateICmp(ICmpInst::ICMP_SLE, LHS, RHS, Name);
2284 }
2285
2286 Value *CreateFCmpOEQ(Value *LHS, Value *RHS, const Twine &Name = "",
2287 MDNode *FPMathTag = nullptr) {
2288 return CreateFCmp(FCmpInst::FCMP_OEQ, LHS, RHS, Name, FPMathTag);
2289 }
2290
2291 Value *CreateFCmpOGT(Value *LHS, Value *RHS, const Twine &Name = "",
2292 MDNode *FPMathTag = nullptr) {
2293 return CreateFCmp(FCmpInst::FCMP_OGT, LHS, RHS, Name, FPMathTag);
2294 }
2295
2296 Value *CreateFCmpOGE(Value *LHS, Value *RHS, const Twine &Name = "",
2297 MDNode *FPMathTag = nullptr) {
2298 return CreateFCmp(FCmpInst::FCMP_OGE, LHS, RHS, Name, FPMathTag);
2299 }
2300
2301 Value *CreateFCmpOLT(Value *LHS, Value *RHS, const Twine &Name = "",
2302 MDNode *FPMathTag = nullptr) {
2303 return CreateFCmp(FCmpInst::FCMP_OLT, LHS, RHS, Name, FPMathTag);
2304 }
2305
2306 Value *CreateFCmpOLE(Value *LHS, Value *RHS, const Twine &Name = "",
2307 MDNode *FPMathTag = nullptr) {
2308 return CreateFCmp(FCmpInst::FCMP_OLE, LHS, RHS, Name, FPMathTag);
2309 }
2310
2311 Value *CreateFCmpONE(Value *LHS, Value *RHS, const Twine &Name = "",
2312 MDNode *FPMathTag = nullptr) {
2313 return CreateFCmp(FCmpInst::FCMP_ONE, LHS, RHS, Name, FPMathTag);
2314 }
2315
2316 Value *CreateFCmpORD(Value *LHS, Value *RHS, const Twine &Name = "",
2317 MDNode *FPMathTag = nullptr) {
2318 return CreateFCmp(FCmpInst::FCMP_ORD, LHS, RHS, Name, FPMathTag);
2319 }
2320
2321 Value *CreateFCmpUNO(Value *LHS, Value *RHS, const Twine &Name = "",
2322 MDNode *FPMathTag = nullptr) {
2323 return CreateFCmp(FCmpInst::FCMP_UNO, LHS, RHS, Name, FPMathTag);
2324 }
2325
2326 Value *CreateFCmpUEQ(Value *LHS, Value *RHS, const Twine &Name = "",
2327 MDNode *FPMathTag = nullptr) {
2328 return CreateFCmp(FCmpInst::FCMP_UEQ, LHS, RHS, Name, FPMathTag);
2329 }
2330
2331 Value *CreateFCmpUGT(Value *LHS, Value *RHS, const Twine &Name = "",
2332 MDNode *FPMathTag = nullptr) {
2333 return CreateFCmp(FCmpInst::FCMP_UGT, LHS, RHS, Name, FPMathTag);
2334 }
2335
2336 Value *CreateFCmpUGE(Value *LHS, Value *RHS, const Twine &Name = "",
2337 MDNode *FPMathTag = nullptr) {
2338 return CreateFCmp(FCmpInst::FCMP_UGE, LHS, RHS, Name, FPMathTag);
2339 }
2340
2341 Value *CreateFCmpULT(Value *LHS, Value *RHS, const Twine &Name = "",
2342 MDNode *FPMathTag = nullptr) {
2343 return CreateFCmp(FCmpInst::FCMP_ULT, LHS, RHS, Name, FPMathTag);
2344 }
2345
2346 Value *CreateFCmpULE(Value *LHS, Value *RHS, const Twine &Name = "",
2347 MDNode *FPMathTag = nullptr) {
2348 return CreateFCmp(FCmpInst::FCMP_ULE, LHS, RHS, Name, FPMathTag);
2349 }
2350
2351 Value *CreateFCmpUNE(Value *LHS, Value *RHS, const Twine &Name = "",
2352 MDNode *FPMathTag = nullptr) {
2353 return CreateFCmp(FCmpInst::FCMP_UNE, LHS, RHS, Name, FPMathTag);
2354 }
2355
2356 Value *CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS,
2357 const Twine &Name = "") {
2358 if (auto *LC = dyn_cast<Constant>(LHS))
2359 if (auto *RC = dyn_cast<Constant>(RHS))
2360 return Insert(Folder.CreateICmp(P, LC, RC), Name);
2361 return Insert(new ICmpInst(P, LHS, RHS), Name);
2362 }
2363
2364 Value *CreateFCmp(CmpInst::Predicate P, Value *LHS, Value *RHS,
2365 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2366 if (IsFPConstrained)
2367 return CreateConstrainedFPCmp(Intrinsic::experimental_constrained_fcmp,
2368 P, LHS, RHS, Name);
2369
2370 if (auto *LC = dyn_cast<Constant>(LHS))
2371 if (auto *RC = dyn_cast<Constant>(RHS))
2372 return Insert(Folder.CreateFCmp(P, LC, RC), Name);
2373 return Insert(setFPAttrs(new FCmpInst(P, LHS, RHS), FPMathTag, FMF), Name);
2374 }
2375
2376 Value *CreateFCmpS(CmpInst::Predicate P, Value *LHS, Value *RHS,
2377 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2378 if (IsFPConstrained)
2379 return CreateConstrainedFPCmp(Intrinsic::experimental_constrained_fcmps,
2380 P, LHS, RHS, Name);
2381
2382 if (auto *LC = dyn_cast<Constant>(LHS))
2383 if (auto *RC = dyn_cast<Constant>(RHS))
2384 return Insert(Folder.CreateFCmp(P, LC, RC), Name);
2385 return Insert(setFPAttrs(new FCmpInst(P, LHS, RHS), FPMathTag, FMF), Name);
2386 }
2387
2388 CallInst *CreateConstrainedFPCmp(
2389 Intrinsic::ID ID, CmpInst::Predicate P, Value *L, Value *R,
2390 const Twine &Name = "",
2391 Optional<fp::ExceptionBehavior> Except = None) {
2392 Value *PredicateV = getConstrainedFPPredicate(P);
2393 Value *ExceptV = getConstrainedFPExcept(Except);
2394
2395 CallInst *C = CreateIntrinsic(ID, {L->getType()},
2396 {L, R, PredicateV, ExceptV}, nullptr, Name);
2397 setConstrainedFPCallAttr(C);
2398 return C;
2399 }
2400
2401 //===--------------------------------------------------------------------===//
2402 // Instruction creation methods: Other Instructions
2403 //===--------------------------------------------------------------------===//
2404
2405 PHINode *CreatePHI(Type *Ty, unsigned NumReservedValues,
2406 const Twine &Name = "") {
2407 PHINode *Phi = PHINode::Create(Ty, NumReservedValues);
2408 if (isa<FPMathOperator>(Phi))
2409 setFPAttrs(Phi, nullptr /* MDNode* */, FMF);
2410 return Insert(Phi, Name);
2411 }
2412
2413 CallInst *CreateCall(FunctionType *FTy, Value *Callee,
2414 ArrayRef<Value *> Args = None, const Twine &Name = "",
2415 MDNode *FPMathTag = nullptr) {
2416 CallInst *CI = CallInst::Create(FTy, Callee, Args, DefaultOperandBundles);
2417 if (IsFPConstrained)
2418 setConstrainedFPCallAttr(CI);
2419 if (isa<FPMathOperator>(CI))
2420 setFPAttrs(CI, FPMathTag, FMF);
2421 return Insert(CI, Name);
2422 }
2423
2424 CallInst *CreateCall(FunctionType *FTy, Value *Callee, ArrayRef<Value *> Args,
2425 ArrayRef<OperandBundleDef> OpBundles,
2426 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2427 CallInst *CI = CallInst::Create(FTy, Callee, Args, OpBundles);
2428 if (IsFPConstrained)
2429 setConstrainedFPCallAttr(CI);
2430 if (isa<FPMathOperator>(CI))
2431 setFPAttrs(CI, FPMathTag, FMF);
2432 return Insert(CI, Name);
2433 }
2434
2435 CallInst *CreateCall(FunctionCallee Callee, ArrayRef<Value *> Args = None,
2436 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2437 return CreateCall(Callee.getFunctionType(), Callee.getCallee(), Args, Name,
2438 FPMathTag);
2439 }
2440
2441 CallInst *CreateCall(FunctionCallee Callee, ArrayRef<Value *> Args,
2442 ArrayRef<OperandBundleDef> OpBundles,
2443 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2444 return CreateCall(Callee.getFunctionType(), Callee.getCallee(), Args,
2445 OpBundles, Name, FPMathTag);
2446 }
2447
2448 // Deprecated [opaque pointer types]
2449 CallInst *CreateCall(Value *Callee, ArrayRef<Value *> Args = None,
2450 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2451 return CreateCall(
2452 cast<FunctionType>(Callee->getType()->getPointerElementType()), Callee,
2453 Args, Name, FPMathTag);
2454 }
2455
2456 // Deprecated [opaque pointer types]
2457 CallInst *CreateCall(Value *Callee, ArrayRef<Value *> Args,
2458 ArrayRef<OperandBundleDef> OpBundles,
2459 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2460 return CreateCall(
2461 cast<FunctionType>(Callee->getType()->getPointerElementType()), Callee,
2462 Args, OpBundles, Name, FPMathTag);
2463 }
2464
2465 CallInst *CreateConstrainedFPCall(
2466 Function *Callee, ArrayRef<Value *> Args, const Twine &Name = "",
2467 Optional<fp::RoundingMode> Rounding = None,
2468 Optional<fp::ExceptionBehavior> Except = None) {
2469 llvm::SmallVector<Value *, 6> UseArgs;
2470
2471 for (auto *OneArg : Args)
2472 UseArgs.push_back(OneArg);
2473 bool HasRoundingMD = false;
2474 switch (Callee->getIntrinsicID()) {
2475 default:
2476 break;
2477#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
2478 case Intrinsic::INTRINSIC: \
2479 HasRoundingMD = ROUND_MODE; \
2480 break;
2481#include "llvm/IR/ConstrainedOps.def"
2482 }
2483 if (HasRoundingMD)
2484 UseArgs.push_back(getConstrainedFPRounding(Rounding));
2485 UseArgs.push_back(getConstrainedFPExcept(Except));
2486
2487 CallInst *C = CreateCall(Callee, UseArgs, Name);
2488 setConstrainedFPCallAttr(C);
2489 return C;
2490 }
2491
2492 Value *CreateSelect(Value *C, Value *True, Value *False,
2493 const Twine &Name = "", Instruction *MDFrom = nullptr) {
2494 if (auto *CC = dyn_cast<Constant>(C))
2495 if (auto *TC = dyn_cast<Constant>(True))
2496 if (auto *FC = dyn_cast<Constant>(False))
2497 return Insert(Folder.CreateSelect(CC, TC, FC), Name);
2498
2499 SelectInst *Sel = SelectInst::Create(C, True, False);
2500 if (MDFrom) {
2501 MDNode *Prof = MDFrom->getMetadata(LLVMContext::MD_prof);
2502 MDNode *Unpred = MDFrom->getMetadata(LLVMContext::MD_unpredictable);
2503 Sel = addBranchMetadata(Sel, Prof, Unpred);
2504 }
2505 if (isa<FPMathOperator>(Sel))
2506 setFPAttrs(Sel, nullptr /* MDNode* */, FMF);
2507 return Insert(Sel, Name);
2508 }
2509
2510 VAArgInst *CreateVAArg(Value *List, Type *Ty, const Twine &Name = "") {
2511 return Insert(new VAArgInst(List, Ty), Name);
2512 }
2513
2514 Value *CreateExtractElement(Value *Vec, Value *Idx,
2515 const Twine &Name = "") {
2516 if (auto *VC = dyn_cast<Constant>(Vec))
2517 if (auto *IC = dyn_cast<Constant>(Idx))
2518 return Insert(Folder.CreateExtractElement(VC, IC), Name);
2519 return Insert(ExtractElementInst::Create(Vec, Idx), Name);
2520 }
2521
2522 Value *CreateExtractElement(Value *Vec, uint64_t Idx,
2523 const Twine &Name = "") {
2524 return CreateExtractElement(Vec, getInt64(Idx), Name);
2525 }
2526
2527 Value *CreateInsertElement(Value *Vec, Value *NewElt, Value *Idx,
2528 const Twine &Name = "") {
2529 if (auto *VC = dyn_cast<Constant>(Vec))
2530 if (auto *NC = dyn_cast<Constant>(NewElt))
2531 if (auto *IC = dyn_cast<Constant>(Idx))
2532 return Insert(Folder.CreateInsertElement(VC, NC, IC), Name);
2533 return Insert(InsertElementInst::Create(Vec, NewElt, Idx), Name);
2534 }
2535
2536 Value *CreateInsertElement(Value *Vec, Value *NewElt, uint64_t Idx,
2537 const Twine &Name = "") {
2538 return CreateInsertElement(Vec, NewElt, getInt64(Idx), Name);
2539 }
2540
2541 Value *CreateShuffleVector(Value *V1, Value *V2, Value *Mask,
2542 const Twine &Name = "") {
2543 if (auto *V1C = dyn_cast<Constant>(V1))
2544 if (auto *V2C = dyn_cast<Constant>(V2))
2545 if (auto *MC = dyn_cast<Constant>(Mask))
2546 return Insert(Folder.CreateShuffleVector(V1C, V2C, MC), Name);
2547 return Insert(new ShuffleVectorInst(V1, V2, Mask), Name);
2548 }
2549
2550 Value *CreateShuffleVector(Value *V1, Value *V2, ArrayRef<uint32_t> IntMask,
2551 const Twine &Name = "") {
2552 Value *Mask = ConstantDataVector::get(Context, IntMask);
2553 return CreateShuffleVector(V1, V2, Mask, Name);
2554 }
2555
2556 Value *CreateExtractValue(Value *Agg,
2557 ArrayRef<unsigned> Idxs,
2558 const Twine &Name = "") {
2559 if (auto *AggC = dyn_cast<Constant>(Agg))
2560 return Insert(Folder.CreateExtractValue(AggC, Idxs), Name);
2561 return Insert(ExtractValueInst::Create(Agg, Idxs), Name);
2562 }
2563
2564 Value *CreateInsertValue(Value *Agg, Value *Val,
2565 ArrayRef<unsigned> Idxs,
2566 const Twine &Name = "") {
2567 if (auto *AggC = dyn_cast<Constant>(Agg))
2568 if (auto *ValC = dyn_cast<Constant>(Val))
2569 return Insert(Folder.CreateInsertValue(AggC, ValC, Idxs), Name);
2570 return Insert(InsertValueInst::Create(Agg, Val, Idxs), Name);
2571 }
2572
2573 LandingPadInst *CreateLandingPad(Type *Ty, unsigned NumClauses,
2574 const Twine &Name = "") {
2575 return Insert(LandingPadInst::Create(Ty, NumClauses), Name);
2576 }
2577
2578 Value *CreateFreeze(Value *V, const Twine &Name = "") {
2579 return Insert(new FreezeInst(V), Name);
2580 }
2581
2582 //===--------------------------------------------------------------------===//
2583 // Utility creation methods
2584 //===--------------------------------------------------------------------===//
2585
2586 /// Return an i1 value testing if \p Arg is null.
2587 Value *CreateIsNull(Value *Arg, const Twine &Name = "") {
2588 return CreateICmpEQ(Arg, Constant::getNullValue(Arg->getType()),
2589 Name);
2590 }
2591
2592 /// Return an i1 value testing if \p Arg is not null.
2593 Value *CreateIsNotNull(Value *Arg, const Twine &Name = "") {
2594 return CreateICmpNE(Arg, Constant::getNullValue(Arg->getType()),
2595 Name);
2596 }
2597
2598 /// Return the i64 difference between two pointer values, dividing out
2599 /// the size of the pointed-to objects.
2600 ///
2601 /// This is intended to implement C-style pointer subtraction. As such, the
2602 /// pointers must be appropriately aligned for their element types and
2603 /// pointing into the same object.
2604 Value *CreatePtrDiff(Value *LHS, Value *RHS, const Twine &Name = "") {
2605 assert(LHS->getType() == RHS->getType() &&((LHS->getType() == RHS->getType() && "Pointer subtraction operand types must match!"
) ? static_cast<void> (0) : __assert_fail ("LHS->getType() == RHS->getType() && \"Pointer subtraction operand types must match!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/IRBuilder.h"
, 2606, __PRETTY_FUNCTION__))
2606 "Pointer subtraction operand types must match!")((LHS->getType() == RHS->getType() && "Pointer subtraction operand types must match!"
) ? static_cast<void> (0) : __assert_fail ("LHS->getType() == RHS->getType() && \"Pointer subtraction operand types must match!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/IRBuilder.h"
, 2606, __PRETTY_FUNCTION__))
;
2607 auto *ArgType = cast<PointerType>(LHS->getType());
2608 Value *LHS_int = CreatePtrToInt(LHS, Type::getInt64Ty(Context));
2609 Value *RHS_int = CreatePtrToInt(RHS, Type::getInt64Ty(Context));
2610 Value *Difference = CreateSub(LHS_int, RHS_int);
2611 return CreateExactSDiv(Difference,
2612 ConstantExpr::getSizeOf(ArgType->getElementType()),
2613 Name);
2614 }
2615
2616 /// Create a launder.invariant.group intrinsic call. If Ptr type is
2617 /// different from pointer to i8, it's casted to pointer to i8 in the same
2618 /// address space before call and casted back to Ptr type after call.
2619 Value *CreateLaunderInvariantGroup(Value *Ptr) {
2620 assert(isa<PointerType>(Ptr->getType()) &&((isa<PointerType>(Ptr->getType()) && "launder.invariant.group only applies to pointers."
) ? static_cast<void> (0) : __assert_fail ("isa<PointerType>(Ptr->getType()) && \"launder.invariant.group only applies to pointers.\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/IRBuilder.h"
, 2621, __PRETTY_FUNCTION__))
2621 "launder.invariant.group only applies to pointers.")((isa<PointerType>(Ptr->getType()) && "launder.invariant.group only applies to pointers."
) ? static_cast<void> (0) : __assert_fail ("isa<PointerType>(Ptr->getType()) && \"launder.invariant.group only applies to pointers.\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/IRBuilder.h"
, 2621, __PRETTY_FUNCTION__))
;
2622 // FIXME: we could potentially avoid casts to/from i8*.
2623 auto *PtrType = Ptr->getType();
2624 auto *Int8PtrTy = getInt8PtrTy(PtrType->getPointerAddressSpace());
2625 if (PtrType != Int8PtrTy)
2626 Ptr = CreateBitCast(Ptr, Int8PtrTy);
2627 Module *M = BB->getParent()->getParent();
2628 Function *FnLaunderInvariantGroup = Intrinsic::getDeclaration(
2629 M, Intrinsic::launder_invariant_group, {Int8PtrTy});
2630
2631 assert(FnLaunderInvariantGroup->getReturnType() == Int8PtrTy &&((FnLaunderInvariantGroup->getReturnType() == Int8PtrTy &&
FnLaunderInvariantGroup->getFunctionType()->getParamType
(0) == Int8PtrTy && "LaunderInvariantGroup should take and return the same type"
) ? static_cast<void> (0) : __assert_fail ("FnLaunderInvariantGroup->getReturnType() == Int8PtrTy && FnLaunderInvariantGroup->getFunctionType()->getParamType(0) == Int8PtrTy && \"LaunderInvariantGroup should take and return the same type\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/IRBuilder.h"
, 2634, __PRETTY_FUNCTION__))
2632 FnLaunderInvariantGroup->getFunctionType()->getParamType(0) ==((FnLaunderInvariantGroup->getReturnType() == Int8PtrTy &&
FnLaunderInvariantGroup->getFunctionType()->getParamType
(0) == Int8PtrTy && "LaunderInvariantGroup should take and return the same type"
) ? static_cast<void> (0) : __assert_fail ("FnLaunderInvariantGroup->getReturnType() == Int8PtrTy && FnLaunderInvariantGroup->getFunctionType()->getParamType(0) == Int8PtrTy && \"LaunderInvariantGroup should take and return the same type\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/IRBuilder.h"
, 2634, __PRETTY_FUNCTION__))
2633 Int8PtrTy &&((FnLaunderInvariantGroup->getReturnType() == Int8PtrTy &&
FnLaunderInvariantGroup->getFunctionType()->getParamType
(0) == Int8PtrTy && "LaunderInvariantGroup should take and return the same type"
) ? static_cast<void> (0) : __assert_fail ("FnLaunderInvariantGroup->getReturnType() == Int8PtrTy && FnLaunderInvariantGroup->getFunctionType()->getParamType(0) == Int8PtrTy && \"LaunderInvariantGroup should take and return the same type\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/IRBuilder.h"
, 2634, __PRETTY_FUNCTION__))
2634 "LaunderInvariantGroup should take and return the same type")((FnLaunderInvariantGroup->getReturnType() == Int8PtrTy &&
FnLaunderInvariantGroup->getFunctionType()->getParamType
(0) == Int8PtrTy && "LaunderInvariantGroup should take and return the same type"
) ? static_cast<void> (0) : __assert_fail ("FnLaunderInvariantGroup->getReturnType() == Int8PtrTy && FnLaunderInvariantGroup->getFunctionType()->getParamType(0) == Int8PtrTy && \"LaunderInvariantGroup should take and return the same type\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/IRBuilder.h"
, 2634, __PRETTY_FUNCTION__))
;
2635
2636 CallInst *Fn = CreateCall(FnLaunderInvariantGroup, {Ptr});
2637
2638 if (PtrType != Int8PtrTy)
2639 return CreateBitCast(Fn, PtrType);
2640 return Fn;
2641 }
2642
2643 /// \brief Create a strip.invariant.group intrinsic call. If Ptr type is
2644 /// different from pointer to i8, it's casted to pointer to i8 in the same
2645 /// address space before call and casted back to Ptr type after call.
2646 Value *CreateStripInvariantGroup(Value *Ptr) {
2647 assert(isa<PointerType>(Ptr->getType()) &&((isa<PointerType>(Ptr->getType()) && "strip.invariant.group only applies to pointers."
) ? static_cast<void> (0) : __assert_fail ("isa<PointerType>(Ptr->getType()) && \"strip.invariant.group only applies to pointers.\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/IRBuilder.h"
, 2648, __PRETTY_FUNCTION__))
2648 "strip.invariant.group only applies to pointers.")((isa<PointerType>(Ptr->getType()) && "strip.invariant.group only applies to pointers."
) ? static_cast<void> (0) : __assert_fail ("isa<PointerType>(Ptr->getType()) && \"strip.invariant.group only applies to pointers.\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/IRBuilder.h"
, 2648, __PRETTY_FUNCTION__))
;
2649
2650 // FIXME: we could potentially avoid casts to/from i8*.
2651 auto *PtrType = Ptr->getType();
2652 auto *Int8PtrTy = getInt8PtrTy(PtrType->getPointerAddressSpace());
2653 if (PtrType != Int8PtrTy)
2654 Ptr = CreateBitCast(Ptr, Int8PtrTy);
2655 Module *M = BB->getParent()->getParent();
2656 Function *FnStripInvariantGroup = Intrinsic::getDeclaration(
2657 M, Intrinsic::strip_invariant_group, {Int8PtrTy});
2658
2659 assert(FnStripInvariantGroup->getReturnType() == Int8PtrTy &&((FnStripInvariantGroup->getReturnType() == Int8PtrTy &&
FnStripInvariantGroup->getFunctionType()->getParamType
(0) == Int8PtrTy && "StripInvariantGroup should take and return the same type"
) ? static_cast<void> (0) : __assert_fail ("FnStripInvariantGroup->getReturnType() == Int8PtrTy && FnStripInvariantGroup->getFunctionType()->getParamType(0) == Int8PtrTy && \"StripInvariantGroup should take and return the same type\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/IRBuilder.h"
, 2662, __PRETTY_FUNCTION__))
2660 FnStripInvariantGroup->getFunctionType()->getParamType(0) ==((FnStripInvariantGroup->getReturnType() == Int8PtrTy &&
FnStripInvariantGroup->getFunctionType()->getParamType
(0) == Int8PtrTy && "StripInvariantGroup should take and return the same type"
) ? static_cast<void> (0) : __assert_fail ("FnStripInvariantGroup->getReturnType() == Int8PtrTy && FnStripInvariantGroup->getFunctionType()->getParamType(0) == Int8PtrTy && \"StripInvariantGroup should take and return the same type\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/IRBuilder.h"
, 2662, __PRETTY_FUNCTION__))
2661 Int8PtrTy &&((FnStripInvariantGroup->getReturnType() == Int8PtrTy &&
FnStripInvariantGroup->getFunctionType()->getParamType
(0) == Int8PtrTy && "StripInvariantGroup should take and return the same type"
) ? static_cast<void> (0) : __assert_fail ("FnStripInvariantGroup->getReturnType() == Int8PtrTy && FnStripInvariantGroup->getFunctionType()->getParamType(0) == Int8PtrTy && \"StripInvariantGroup should take and return the same type\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/IRBuilder.h"
, 2662, __PRETTY_FUNCTION__))
2662 "StripInvariantGroup should take and return the same type")((FnStripInvariantGroup->getReturnType() == Int8PtrTy &&
FnStripInvariantGroup->getFunctionType()->getParamType
(0) == Int8PtrTy && "StripInvariantGroup should take and return the same type"
) ? static_cast<void> (0) : __assert_fail ("FnStripInvariantGroup->getReturnType() == Int8PtrTy && FnStripInvariantGroup->getFunctionType()->getParamType(0) == Int8PtrTy && \"StripInvariantGroup should take and return the same type\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/IRBuilder.h"
, 2662, __PRETTY_FUNCTION__))
;
2663
2664 CallInst *Fn = CreateCall(FnStripInvariantGroup, {Ptr});
2665
2666 if (PtrType != Int8PtrTy)
2667 return CreateBitCast(Fn, PtrType);
2668 return Fn;
2669 }
2670
2671 /// Return a vector value that contains \arg V broadcasted to \p
2672 /// NumElts elements.
2673 Value *CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name = "") {
2674 assert(NumElts > 0 && "Cannot splat to an empty vector!")((NumElts > 0 && "Cannot splat to an empty vector!"
) ? static_cast<void> (0) : __assert_fail ("NumElts > 0 && \"Cannot splat to an empty vector!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/IRBuilder.h"
, 2674, __PRETTY_FUNCTION__))
;
2675
2676 // First insert it into an undef vector so we can shuffle it.
2677 Type *I32Ty = getInt32Ty();
2678 Value *Undef = UndefValue::get(VectorType::get(V->getType(), NumElts));
2679 V = CreateInsertElement(Undef, V, ConstantInt::get(I32Ty, 0),
2680 Name + ".splatinsert");
2681
2682 // Shuffle the value across the desired number of elements.
2683 Value *Zeros = ConstantAggregateZero::get(VectorType::get(I32Ty, NumElts));
2684 return CreateShuffleVector(V, Undef, Zeros, Name + ".splat");
2685 }
2686
2687 /// Return a value that has been extracted from a larger integer type.
2688 Value *CreateExtractInteger(const DataLayout &DL, Value *From,
2689 IntegerType *ExtractedTy, uint64_t Offset,
2690 const Twine &Name) {
2691 auto *IntTy = cast<IntegerType>(From->getType());
2692 assert(DL.getTypeStoreSize(ExtractedTy) + Offset <=((DL.getTypeStoreSize(ExtractedTy) + Offset <= DL.getTypeStoreSize
(IntTy) && "Element extends past full value") ? static_cast
<void> (0) : __assert_fail ("DL.getTypeStoreSize(ExtractedTy) + Offset <= DL.getTypeStoreSize(IntTy) && \"Element extends past full value\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/IRBuilder.h"
, 2694, __PRETTY_FUNCTION__))
2693 DL.getTypeStoreSize(IntTy) &&((DL.getTypeStoreSize(ExtractedTy) + Offset <= DL.getTypeStoreSize
(IntTy) && "Element extends past full value") ? static_cast
<void> (0) : __assert_fail ("DL.getTypeStoreSize(ExtractedTy) + Offset <= DL.getTypeStoreSize(IntTy) && \"Element extends past full value\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/IRBuilder.h"
, 2694, __PRETTY_FUNCTION__))
2694 "Element extends past full value")((DL.getTypeStoreSize(ExtractedTy) + Offset <= DL.getTypeStoreSize
(IntTy) && "Element extends past full value") ? static_cast
<void> (0) : __assert_fail ("DL.getTypeStoreSize(ExtractedTy) + Offset <= DL.getTypeStoreSize(IntTy) && \"Element extends past full value\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/IRBuilder.h"
, 2694, __PRETTY_FUNCTION__))
;
2695 uint64_t ShAmt = 8 * Offset;
2696 Value *V = From;
2697 if (DL.isBigEndian())
2698 ShAmt = 8 * (DL.getTypeStoreSize(IntTy) -
2699 DL.getTypeStoreSize(ExtractedTy) - Offset);
2700 if (ShAmt) {
2701 V = CreateLShr(V, ShAmt, Name + ".shift");
2702 }
2703 assert(ExtractedTy->getBitWidth() <= IntTy->getBitWidth() &&((ExtractedTy->getBitWidth() <= IntTy->getBitWidth()
&& "Cannot extract to a larger integer!") ? static_cast
<void> (0) : __assert_fail ("ExtractedTy->getBitWidth() <= IntTy->getBitWidth() && \"Cannot extract to a larger integer!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/IRBuilder.h"
, 2704, __PRETTY_FUNCTION__))
2704 "Cannot extract to a larger integer!")((ExtractedTy->getBitWidth() <= IntTy->getBitWidth()
&& "Cannot extract to a larger integer!") ? static_cast
<void> (0) : __assert_fail ("ExtractedTy->getBitWidth() <= IntTy->getBitWidth() && \"Cannot extract to a larger integer!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/IRBuilder.h"
, 2704, __PRETTY_FUNCTION__))
;
2705 if (ExtractedTy != IntTy) {
2706 V = CreateTrunc(V, ExtractedTy, Name + ".trunc");
2707 }
2708 return V;
2709 }
2710
2711 Value *CreatePreserveArrayAccessIndex(Type *ElTy, Value *Base,
2712 unsigned Dimension, unsigned LastIndex,
2713 MDNode *DbgInfo) {
2714 assert(isa<PointerType>(Base->getType()) &&((isa<PointerType>(Base->getType()) && "Invalid Base ptr type for preserve.array.access.index."
) ? static_cast<void> (0) : __assert_fail ("isa<PointerType>(Base->getType()) && \"Invalid Base ptr type for preserve.array.access.index.\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/IRBuilder.h"
, 2715, __PRETTY_FUNCTION__))
2715 "Invalid Base ptr type for preserve.array.access.index.")((isa<PointerType>(Base->getType()) && "Invalid Base ptr type for preserve.array.access.index."
) ? static_cast<void> (0) : __assert_fail ("isa<PointerType>(Base->getType()) && \"Invalid Base ptr type for preserve.array.access.index.\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/IRBuilder.h"
, 2715, __PRETTY_FUNCTION__))
;
2716 auto *BaseType = Base->getType();
2717
2718 Value *LastIndexV = getInt32(LastIndex);
2719 Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0);
2720 SmallVector<Value *, 4> IdxList;
2721 for (unsigned I = 0; I < Dimension; ++I)
2722 IdxList.push_back(Zero);
2723 IdxList.push_back(LastIndexV);
2724
2725 Type *ResultType =
2726 GetElementPtrInst::getGEPReturnType(ElTy, Base, IdxList);
2727
2728 Module *M = BB->getParent()->getParent();
2729 Function *FnPreserveArrayAccessIndex = Intrinsic::getDeclaration(
2730 M, Intrinsic::preserve_array_access_index, {ResultType, BaseType});
2731
2732 Value *DimV = getInt32(Dimension);
2733 CallInst *Fn =
2734 CreateCall(FnPreserveArrayAccessIndex, {Base, DimV, LastIndexV});
2735 if (DbgInfo)
2736 Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
2737
2738 return Fn;
2739 }
2740
2741 Value *CreatePreserveUnionAccessIndex(Value *Base, unsigned FieldIndex,
2742 MDNode *DbgInfo) {
2743 assert(isa<PointerType>(Base->getType()) &&((isa<PointerType>(Base->getType()) && "Invalid Base ptr type for preserve.union.access.index."
) ? static_cast<void> (0) : __assert_fail ("isa<PointerType>(Base->getType()) && \"Invalid Base ptr type for preserve.union.access.index.\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/IRBuilder.h"
, 2744, __PRETTY_FUNCTION__))
2744 "Invalid Base ptr type for preserve.union.access.index.")((isa<PointerType>(Base->getType()) && "Invalid Base ptr type for preserve.union.access.index."
) ? static_cast<void> (0) : __assert_fail ("isa<PointerType>(Base->getType()) && \"Invalid Base ptr type for preserve.union.access.index.\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/IRBuilder.h"
, 2744, __PRETTY_FUNCTION__))
;
2745 auto *BaseType = Base->getType();
2746
2747 Module *M = BB->getParent()->getParent();
2748 Function *FnPreserveUnionAccessIndex = Intrinsic::getDeclaration(
2749 M, Intrinsic::preserve_union_access_index, {BaseType, BaseType});
2750
2751 Value *DIIndex = getInt32(FieldIndex);
2752 CallInst *Fn =
2753 CreateCall(FnPreserveUnionAccessIndex, {Base, DIIndex});
2754 if (DbgInfo)
2755 Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
2756
2757 return Fn;
2758 }
2759
2760 Value *CreatePreserveStructAccessIndex(Type *ElTy, Value *Base,
2761 unsigned Index, unsigned FieldIndex,
2762 MDNode *DbgInfo) {
2763 assert(isa<PointerType>(Base->getType()) &&((isa<PointerType>(Base->getType()) && "Invalid Base ptr type for preserve.struct.access.index."
) ? static_cast<void> (0) : __assert_fail ("isa<PointerType>(Base->getType()) && \"Invalid Base ptr type for preserve.struct.access.index.\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/IRBuilder.h"
, 2764, __PRETTY_FUNCTION__))
2764 "Invalid Base ptr type for preserve.struct.access.index.")((isa<PointerType>(Base->getType()) && "Invalid Base ptr type for preserve.struct.access.index."
) ? static_cast<void> (0) : __assert_fail ("isa<PointerType>(Base->getType()) && \"Invalid Base ptr type for preserve.struct.access.index.\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/IRBuilder.h"
, 2764, __PRETTY_FUNCTION__))
;
2765 auto *BaseType = Base->getType();
2766
2767 Value *GEPIndex = getInt32(Index);
2768 Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0);
2769 Type *ResultType =
2770 GetElementPtrInst::getGEPReturnType(ElTy, Base, {Zero, GEPIndex});
2771
2772 Module *M = BB->getParent()->getParent();
2773 Function *FnPreserveStructAccessIndex = Intrinsic::getDeclaration(
2774 M, Intrinsic::preserve_struct_access_index, {ResultType, BaseType});
2775
2776 Value *DIIndex = getInt32(FieldIndex);
2777 CallInst *Fn = CreateCall(FnPreserveStructAccessIndex,
2778 {Base, GEPIndex, DIIndex});
2779 if (DbgInfo)
2780 Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
2781
2782 return Fn;
2783 }
2784
2785private:
2786 /// Helper function that creates an assume intrinsic call that
2787 /// represents an alignment assumption on the provided Ptr, Mask, Type
2788 /// and Offset. It may be sometimes useful to do some other logic
2789 /// based on this alignment check, thus it can be stored into 'TheCheck'.
2790 CallInst *CreateAlignmentAssumptionHelper(const DataLayout &DL,
2791 Value *PtrValue, Value *Mask,
2792 Type *IntPtrTy, Value *OffsetValue,
2793 Value **TheCheck) {
2794 Value *PtrIntValue = CreatePtrToInt(PtrValue, IntPtrTy, "ptrint");
2795
2796 if (OffsetValue) {
2797 bool IsOffsetZero = false;
2798 if (const auto *CI = dyn_cast<ConstantInt>(OffsetValue))
2799 IsOffsetZero = CI->isZero();
2800
2801 if (!IsOffsetZero) {
2802 if (OffsetValue->getType() != IntPtrTy)
2803 OffsetValue = CreateIntCast(OffsetValue, IntPtrTy, /*isSigned*/ true,
2804 "offsetcast");
2805 PtrIntValue = CreateSub(PtrIntValue, OffsetValue, "offsetptr");
2806 }
2807 }
2808
2809 Value *Zero = ConstantInt::get(IntPtrTy, 0);
2810 Value *MaskedPtr = CreateAnd(PtrIntValue, Mask, "maskedptr");
2811 Value *InvCond = CreateICmpEQ(MaskedPtr, Zero, "maskcond");
2812 if (TheCheck)
2813 *TheCheck = InvCond;
2814
2815 return CreateAssumption(InvCond);
2816 }
2817
2818public:
2819 /// Create an assume intrinsic call that represents an alignment
2820 /// assumption on the provided pointer.
2821 ///
2822 /// An optional offset can be provided, and if it is provided, the offset
2823 /// must be subtracted from the provided pointer to get the pointer with the
2824 /// specified alignment.
2825 ///
2826 /// It may be sometimes useful to do some other logic
2827 /// based on this alignment check, thus it can be stored into 'TheCheck'.
2828 CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue,
2829 unsigned Alignment,
2830 Value *OffsetValue = nullptr,
2831 Value **TheCheck = nullptr) {
2832 assert(isa<PointerType>(PtrValue->getType()) &&((isa<PointerType>(PtrValue->getType()) && "trying to create an alignment assumption on a non-pointer?"
) ? static_cast<void> (0) : __assert_fail ("isa<PointerType>(PtrValue->getType()) && \"trying to create an alignment assumption on a non-pointer?\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/IRBuilder.h"
, 2833, __PRETTY_FUNCTION__))
2833 "trying to create an alignment assumption on a non-pointer?")((isa<PointerType>(PtrValue->getType()) && "trying to create an alignment assumption on a non-pointer?"
) ? static_cast<void> (0) : __assert_fail ("isa<PointerType>(PtrValue->getType()) && \"trying to create an alignment assumption on a non-pointer?\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/IRBuilder.h"
, 2833, __PRETTY_FUNCTION__))
;
2834 assert(Alignment != 0 && "Invalid Alignment")((Alignment != 0 && "Invalid Alignment") ? static_cast
<void> (0) : __assert_fail ("Alignment != 0 && \"Invalid Alignment\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/IRBuilder.h"
, 2834, __PRETTY_FUNCTION__))
;
2835 auto *PtrTy = cast<PointerType>(PtrValue->getType());
2836 Type *IntPtrTy = getIntPtrTy(DL, PtrTy->getAddressSpace());
2837
2838 Value *Mask = ConstantInt::get(IntPtrTy, Alignment - 1);
2839 return CreateAlignmentAssumptionHelper(DL, PtrValue, Mask, IntPtrTy,
2840 OffsetValue, TheCheck);
2841 }
2842
2843 /// Create an assume intrinsic call that represents an alignment
2844 /// assumption on the provided pointer.
2845 ///
2846 /// An optional offset can be provided, and if it is provided, the offset
2847 /// must be subtracted from the provided pointer to get the pointer with the
2848 /// specified alignment.
2849 ///
2850 /// It may be sometimes useful to do some other logic
2851 /// based on this alignment check, thus it can be stored into 'TheCheck'.
2852 ///
2853 /// This overload handles the condition where the Alignment is dependent
2854 /// on an existing value rather than a static value.
2855 CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue,
2856 Value *Alignment,
2857 Value *OffsetValue = nullptr,
2858 Value **TheCheck = nullptr) {
2859 assert(isa<PointerType>(PtrValue->getType()) &&((isa<PointerType>(PtrValue->getType()) && "trying to create an alignment assumption on a non-pointer?"
) ? static_cast<void> (0) : __assert_fail ("isa<PointerType>(PtrValue->getType()) && \"trying to create an alignment assumption on a non-pointer?\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/IRBuilder.h"
, 2860, __PRETTY_FUNCTION__))
2860 "trying to create an alignment assumption on a non-pointer?")((isa<PointerType>(PtrValue->getType()) && "trying to create an alignment assumption on a non-pointer?"
) ? static_cast<void> (0) : __assert_fail ("isa<PointerType>(PtrValue->getType()) && \"trying to create an alignment assumption on a non-pointer?\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/IRBuilder.h"
, 2860, __PRETTY_FUNCTION__))
;
2861 auto *PtrTy = cast<PointerType>(PtrValue->getType());
2862 Type *IntPtrTy = getIntPtrTy(DL, PtrTy->getAddressSpace());
2863
2864 if (Alignment->getType() != IntPtrTy)
2865 Alignment = CreateIntCast(Alignment, IntPtrTy, /*isSigned*/ false,
2866 "alignmentcast");
2867
2868 Value *Mask = CreateSub(Alignment, ConstantInt::get(IntPtrTy, 1), "mask");
2869
2870 return CreateAlignmentAssumptionHelper(DL, PtrValue, Mask, IntPtrTy,
2871 OffsetValue, TheCheck);
2872 }
2873};
2874
2875// Create wrappers for C Binding types (see CBindingWrapping.h).
2876DEFINE_SIMPLE_CONVERSION_FUNCTIONS(IRBuilder<>, LLVMBuilderRef)inline IRBuilder<> *unwrap(LLVMBuilderRef P) { return reinterpret_cast
<IRBuilder<>*>(P); } inline LLVMBuilderRef wrap(const
IRBuilder<> *P) { return reinterpret_cast<LLVMBuilderRef
>(const_cast<IRBuilder<>*>(P)); }
2877
2878} // end namespace llvm
2879
2880#endif // LLVM_IR_IRBUILDER_H