Bug Summary

File:lib/Target/X86/X86InterleavedAccess.cpp
Warning:line 535, column 14
2nd function call argument is an uninitialized value

Annotated Source Code

[?] Use j/k keys for keyboard navigation

/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/X86/X86InterleavedAccess.cpp

1//===- X86InterleavedAccess.cpp -------------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// This file contains the X86 implementation of the interleaved accesses
12/// optimization generating X86-specific instructions/intrinsics for
13/// interleaved access groups.
14//
15//===----------------------------------------------------------------------===//
16
17#include "X86ISelLowering.h"
18#include "X86Subtarget.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/SmallVector.h"
21#include "llvm/Analysis/VectorUtils.h"
22#include "llvm/CodeGen/MachineValueType.h"
23#include "llvm/IR/Constants.h"
24#include "llvm/IR/DataLayout.h"
25#include "llvm/IR/DerivedTypes.h"
26#include "llvm/IR/IRBuilder.h"
27#include "llvm/IR/Instruction.h"
28#include "llvm/IR/Instructions.h"
29#include "llvm/IR/Module.h"
30#include "llvm/IR/Type.h"
31#include "llvm/IR/Value.h"
32#include "llvm/Support/Casting.h"
33#include <algorithm>
34#include <cassert>
35#include <cmath>
36#include <cstdint>
37
38using namespace llvm;
39
40namespace {
41
42/// \brief This class holds necessary information to represent an interleaved
43/// access group and supports utilities to lower the group into
44/// X86-specific instructions/intrinsics.
45/// E.g. A group of interleaving access loads (Factor = 2; accessing every
46/// other element)
47/// %wide.vec = load <8 x i32>, <8 x i32>* %ptr
48/// %v0 = shuffle <8 x i32> %wide.vec, <8 x i32> undef, <0, 2, 4, 6>
49/// %v1 = shuffle <8 x i32> %wide.vec, <8 x i32> undef, <1, 3, 5, 7>
50class X86InterleavedAccessGroup {
51 /// \brief Reference to the wide-load instruction of an interleaved access
52 /// group.
53 Instruction *const Inst;
54
55 /// \brief Reference to the shuffle(s), consumer(s) of the (load) 'Inst'.
56 ArrayRef<ShuffleVectorInst *> Shuffles;
57
58 /// \brief Reference to the starting index of each user-shuffle.
59 ArrayRef<unsigned> Indices;
60
61 /// \brief Reference to the interleaving stride in terms of elements.
62 const unsigned Factor;
63
64 /// \brief Reference to the underlying target.
65 const X86Subtarget &Subtarget;
66
67 const DataLayout &DL;
68
69 IRBuilder<> &Builder;
70
71 /// \brief Breaks down a vector \p 'Inst' of N elements into \p NumSubVectors
72 /// sub vectors of type \p T. Returns the sub-vectors in \p DecomposedVectors.
73 void decompose(Instruction *Inst, unsigned NumSubVectors, VectorType *T,
74 SmallVectorImpl<Instruction *> &DecomposedVectors);
75
76 /// \brief Performs matrix transposition on a 4x4 matrix \p InputVectors and
77 /// returns the transposed-vectors in \p TransposedVectors.
78 /// E.g.
79 /// InputVectors:
80 /// In-V0 = p1, p2, p3, p4
81 /// In-V1 = q1, q2, q3, q4
82 /// In-V2 = r1, r2, r3, r4
83 /// In-V3 = s1, s2, s3, s4
84 /// OutputVectors:
85 /// Out-V0 = p1, q1, r1, s1
86 /// Out-V1 = p2, q2, r2, s2
87 /// Out-V2 = p3, q3, r3, s3
88 /// Out-V3 = P4, q4, r4, s4
89 void transpose_4x4(ArrayRef<Instruction *> InputVectors,
90 SmallVectorImpl<Value *> &TransposedMatrix);
91 void interleave8bitStride4(ArrayRef<Instruction *> InputVectors,
92 SmallVectorImpl<Value *> &TransposedMatrix,
93 unsigned NumSubVecElems);
94 void interleave8bitStride4VF8(ArrayRef<Instruction *> InputVectors,
95 SmallVectorImpl<Value *> &TransposedMatrix);
96 void interleave8bitStride3(ArrayRef<Instruction *> InputVectors,
97 SmallVectorImpl<Value *> &TransposedMatrix,
98 unsigned NumSubVecElems);
99 void deinterleave8bitStride3(ArrayRef<Instruction *> InputVectors,
100 SmallVectorImpl<Value *> &TransposedMatrix,
101 unsigned NumSubVecElems);
102
103public:
104 /// In order to form an interleaved access group X86InterleavedAccessGroup
105 /// requires a wide-load instruction \p 'I', a group of interleaved-vectors
106 /// \p Shuffs, reference to the first indices of each interleaved-vector
107 /// \p 'Ind' and the interleaving stride factor \p F. In order to generate
108 /// X86-specific instructions/intrinsics it also requires the underlying
109 /// target information \p STarget.
110 explicit X86InterleavedAccessGroup(Instruction *I,
111 ArrayRef<ShuffleVectorInst *> Shuffs,
112 ArrayRef<unsigned> Ind, const unsigned F,
113 const X86Subtarget &STarget,
114 IRBuilder<> &B)
115 : Inst(I), Shuffles(Shuffs), Indices(Ind), Factor(F), Subtarget(STarget),
116 DL(Inst->getModule()->getDataLayout()), Builder(B) {}
117
118 /// \brief Returns true if this interleaved access group can be lowered into
119 /// x86-specific instructions/intrinsics, false otherwise.
120 bool isSupported() const;
121
122 /// \brief Lowers this interleaved access group into X86-specific
123 /// instructions/intrinsics.
124 bool lowerIntoOptimizedSequence();
125};
126
127} // end anonymous namespace
128
129bool X86InterleavedAccessGroup::isSupported() const {
130 VectorType *ShuffleVecTy = Shuffles[0]->getType();
131 Type *ShuffleEltTy = ShuffleVecTy->getVectorElementType();
132 unsigned ShuffleElemSize = DL.getTypeSizeInBits(ShuffleEltTy);
133 unsigned WideInstSize;
134
135 // Currently, lowering is supported for the following vectors:
136 // Stride 4:
137 // 1. Store and load of 4-element vectors of 64 bits on AVX.
138 // 2. Store of 16/32-element vectors of 8 bits on AVX.
139 // Stride 3:
140 // 1. Load of 16/32-element vectors of 8 bits on AVX.
141 if (!Subtarget.hasAVX() || (Factor != 4 && Factor != 3))
142 return false;
143
144 if (isa<LoadInst>(Inst)) {
145 WideInstSize = DL.getTypeSizeInBits(Inst->getType());
146 if (cast<LoadInst>(Inst)->getPointerAddressSpace())
147 return false;
148 } else
149 WideInstSize = DL.getTypeSizeInBits(Shuffles[0]->getType());
150
151 // We support shuffle represents stride 4 for byte type with size of
152 // WideInstSize.
153 if (ShuffleElemSize == 64 && WideInstSize == 1024 && Factor == 4)
154 return true;
155
156 if (ShuffleElemSize == 8 && isa<StoreInst>(Inst) && Factor == 4 &&
157 (WideInstSize == 256 || WideInstSize == 512 || WideInstSize == 1024 ||
158 WideInstSize == 2048))
159 return true;
160
161 if (ShuffleElemSize == 8 && Factor == 3 &&
162 (WideInstSize == 384 || WideInstSize == 768 || WideInstSize == 1536))
163 return true;
164
165 return false;
166}
167
168void X86InterleavedAccessGroup::decompose(
169 Instruction *VecInst, unsigned NumSubVectors, VectorType *SubVecTy,
170 SmallVectorImpl<Instruction *> &DecomposedVectors) {
171 assert((isa<LoadInst>(VecInst) || isa<ShuffleVectorInst>(VecInst)) &&(static_cast <bool> ((isa<LoadInst>(VecInst) || isa
<ShuffleVectorInst>(VecInst)) && "Expected Load or Shuffle"
) ? void (0) : __assert_fail ("(isa<LoadInst>(VecInst) || isa<ShuffleVectorInst>(VecInst)) && \"Expected Load or Shuffle\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/X86/X86InterleavedAccess.cpp"
, 172, __extension__ __PRETTY_FUNCTION__))
172 "Expected Load or Shuffle")(static_cast <bool> ((isa<LoadInst>(VecInst) || isa
<ShuffleVectorInst>(VecInst)) && "Expected Load or Shuffle"
) ? void (0) : __assert_fail ("(isa<LoadInst>(VecInst) || isa<ShuffleVectorInst>(VecInst)) && \"Expected Load or Shuffle\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/X86/X86InterleavedAccess.cpp"
, 172, __extension__ __PRETTY_FUNCTION__))
;
173
174 Type *VecWidth = VecInst->getType();
175 (void)VecWidth;
176 assert(VecWidth->isVectorTy() &&(static_cast <bool> (VecWidth->isVectorTy() &&
DL.getTypeSizeInBits(VecWidth) >= DL.getTypeSizeInBits(SubVecTy
) * NumSubVectors && "Invalid Inst-size!!!") ? void (
0) : __assert_fail ("VecWidth->isVectorTy() && DL.getTypeSizeInBits(VecWidth) >= DL.getTypeSizeInBits(SubVecTy) * NumSubVectors && \"Invalid Inst-size!!!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/X86/X86InterleavedAccess.cpp"
, 179, __extension__ __PRETTY_FUNCTION__))
177 DL.getTypeSizeInBits(VecWidth) >=(static_cast <bool> (VecWidth->isVectorTy() &&
DL.getTypeSizeInBits(VecWidth) >= DL.getTypeSizeInBits(SubVecTy
) * NumSubVectors && "Invalid Inst-size!!!") ? void (
0) : __assert_fail ("VecWidth->isVectorTy() && DL.getTypeSizeInBits(VecWidth) >= DL.getTypeSizeInBits(SubVecTy) * NumSubVectors && \"Invalid Inst-size!!!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/X86/X86InterleavedAccess.cpp"
, 179, __extension__ __PRETTY_FUNCTION__))
178 DL.getTypeSizeInBits(SubVecTy) * NumSubVectors &&(static_cast <bool> (VecWidth->isVectorTy() &&
DL.getTypeSizeInBits(VecWidth) >= DL.getTypeSizeInBits(SubVecTy
) * NumSubVectors && "Invalid Inst-size!!!") ? void (
0) : __assert_fail ("VecWidth->isVectorTy() && DL.getTypeSizeInBits(VecWidth) >= DL.getTypeSizeInBits(SubVecTy) * NumSubVectors && \"Invalid Inst-size!!!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/X86/X86InterleavedAccess.cpp"
, 179, __extension__ __PRETTY_FUNCTION__))
179 "Invalid Inst-size!!!")(static_cast <bool> (VecWidth->isVectorTy() &&
DL.getTypeSizeInBits(VecWidth) >= DL.getTypeSizeInBits(SubVecTy
) * NumSubVectors && "Invalid Inst-size!!!") ? void (
0) : __assert_fail ("VecWidth->isVectorTy() && DL.getTypeSizeInBits(VecWidth) >= DL.getTypeSizeInBits(SubVecTy) * NumSubVectors && \"Invalid Inst-size!!!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/X86/X86InterleavedAccess.cpp"
, 179, __extension__ __PRETTY_FUNCTION__))
;
180
181 if (auto *SVI = dyn_cast<ShuffleVectorInst>(VecInst)) {
182 Value *Op0 = SVI->getOperand(0);
183 Value *Op1 = SVI->getOperand(1);
184
185 // Generate N(= NumSubVectors) shuffles of T(= SubVecTy) type.
186 for (unsigned i = 0; i < NumSubVectors; ++i)
187 DecomposedVectors.push_back(
188 cast<ShuffleVectorInst>(Builder.CreateShuffleVector(
189 Op0, Op1,
190 createSequentialMask(Builder, Indices[i],
191 SubVecTy->getVectorNumElements(), 0))));
192 return;
193 }
194
195 // Decompose the load instruction.
196 LoadInst *LI = cast<LoadInst>(VecInst);
197 Type *VecBasePtrTy = SubVecTy->getPointerTo(LI->getPointerAddressSpace());
198 Value *VecBasePtr;
199 unsigned int NumLoads = NumSubVectors;
200 // In the case of stride 3 with a vector of 32 elements load the information
201 // in the following way:
202 // [0,1...,VF/2-1,VF/2+VF,VF/2+VF+1,...,2VF-1]
203 unsigned VecLength = DL.getTypeSizeInBits(VecWidth);
204 if (VecLength == 768 || VecLength == 1536) {
205 Type *VecTran =
206 VectorType::get(Type::getInt8Ty(LI->getContext()), 16)->getPointerTo();
207 VecBasePtr = Builder.CreateBitCast(LI->getPointerOperand(), VecTran);
208 NumLoads = NumSubVectors * (VecLength / 384);
209 } else
210 VecBasePtr = Builder.CreateBitCast(LI->getPointerOperand(), VecBasePtrTy);
211 // Generate N loads of T type.
212 for (unsigned i = 0; i < NumLoads; i++) {
213 // TODO: Support inbounds GEP.
214 Value *NewBasePtr = Builder.CreateGEP(VecBasePtr, Builder.getInt32(i));
215 Instruction *NewLoad =
216 Builder.CreateAlignedLoad(NewBasePtr, LI->getAlignment());
217 DecomposedVectors.push_back(NewLoad);
218 }
219}
220
221// Changing the scale of the vector type by reducing the number of elements and
222// doubling the scalar size.
223static MVT scaleVectorType(MVT VT) {
224 unsigned ScalarSize = VT.getVectorElementType().getScalarSizeInBits() * 2;
225 return MVT::getVectorVT(MVT::getIntegerVT(ScalarSize),
226 VT.getVectorNumElements() / 2);
227}
228
229static uint32_t Concat[] = {
230 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
231 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
232 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
233 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63 };
234
235// genShuffleBland - Creates shuffle according to two vectors.This function is
236// only works on instructions with lane inside 256 registers. According to
237// the mask 'Mask' creates a new Mask 'Out' by the offset of the mask. The
238// offset amount depends on the two integer, 'LowOffset' and 'HighOffset'.
239// Where the 'LowOffset' refers to the first vector and the highOffset refers to
240// the second vector.
241// |a0....a5,b0....b4,c0....c4|a16..a21,b16..b20,c16..c20|
242// |c5...c10,a5....a9,b5....b9|c21..c26,a22..a26,b21..b25|
243// |b10..b15,c11..c15,a10..a15|b26..b31,c27..c31,a27..a31|
244// For the sequence to work as a mirror to the load.
245// We must consider the elements order as above.
246// In this function we are combining two types of shuffles.
247// The first one is vpshufed and the second is a type of "blend" shuffle.
248// By computing the shuffle on a sequence of 16 elements(one lane) and add the
249// correct offset. We are creating a vpsuffed + blend sequence between two
250// shuffles.
251static void genShuffleBland(MVT VT, ArrayRef<uint32_t> Mask,
252 SmallVectorImpl<uint32_t> &Out, int LowOffset,
253 int HighOffset) {
254 assert(VT.getSizeInBits() >= 256 &&(static_cast <bool> (VT.getSizeInBits() >= 256 &&
"This function doesn't accept width smaller then 256") ? void
(0) : __assert_fail ("VT.getSizeInBits() >= 256 && \"This function doesn't accept width smaller then 256\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/X86/X86InterleavedAccess.cpp"
, 255, __extension__ __PRETTY_FUNCTION__))
255 "This function doesn't accept width smaller then 256")(static_cast <bool> (VT.getSizeInBits() >= 256 &&
"This function doesn't accept width smaller then 256") ? void
(0) : __assert_fail ("VT.getSizeInBits() >= 256 && \"This function doesn't accept width smaller then 256\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/X86/X86InterleavedAccess.cpp"
, 255, __extension__ __PRETTY_FUNCTION__))
;
256 unsigned NumOfElm = VT.getVectorNumElements();
257 for (unsigned i = 0; i < Mask.size(); i++)
258 Out.push_back(Mask[i] + LowOffset);
259 for (unsigned i = 0; i < Mask.size(); i++)
260 Out.push_back(Mask[i] + HighOffset + NumOfElm);
261}
262
263// reorderSubVector returns the data to is the original state. And de-facto is
264// the opposite of the function concatSubVector.
265
266// For VecElems = 16
267// Invec[0] - |0| TransposedMatrix[0] - |0|
268// Invec[1] - |1| => TransposedMatrix[1] - |1|
269// Invec[2] - |2| TransposedMatrix[2] - |2|
270
271// For VecElems = 32
272// Invec[0] - |0|3| TransposedMatrix[0] - |0|1|
273// Invec[1] - |1|4| => TransposedMatrix[1] - |2|3|
274// Invec[2] - |2|5| TransposedMatrix[2] - |4|5|
275
276// For VecElems = 64
277// Invec[0] - |0|3|6|9 | TransposedMatrix[0] - |0|1|2 |3 |
278// Invec[1] - |1|4|7|10| => TransposedMatrix[1] - |4|5|6 |7 |
279// Invec[2] - |2|5|8|11| TransposedMatrix[2] - |8|9|10|11|
280
281static void reorderSubVector(MVT VT, SmallVectorImpl<Value *> &TransposedMatrix,
282 ArrayRef<Value *> Vec, ArrayRef<uint32_t> VPShuf,
283 unsigned VecElems, unsigned Stride,
284 IRBuilder<> Builder) {
285
286 if (VecElems == 16) {
287 for (unsigned i = 0; i < Stride; i++)
288 TransposedMatrix[i] = Builder.CreateShuffleVector(
289 Vec[i], UndefValue::get(Vec[i]->getType()), VPShuf);
290 return;
291 }
292
293 SmallVector<uint32_t, 32> OptimizeShuf;
294 Value *Temp[8];
295
296 for (unsigned i = 0; i < (VecElems / 16) * Stride; i += 2) {
297 genShuffleBland(VT, VPShuf, OptimizeShuf, (i / Stride) * 16,
298 (i + 1) / Stride * 16);
299 Temp[i / 2] = Builder.CreateShuffleVector(
300 Vec[i % Stride], Vec[(i + 1) % Stride], OptimizeShuf);
301 OptimizeShuf.clear();
302 }
303
304 if (VecElems == 32) {
305 std::copy(Temp, Temp + Stride, TransposedMatrix.begin());
306 return;
307 }
308 else
309 for (unsigned i = 0; i < Stride; i++)
310 TransposedMatrix[i] =
311 Builder.CreateShuffleVector(Temp[2 * i], Temp[2 * i + 1], Concat);
312}
313
314void X86InterleavedAccessGroup::interleave8bitStride4VF8(
315 ArrayRef<Instruction *> Matrix,
316 SmallVectorImpl<Value *> &TransposedMatrix) {
317 // Assuming we start from the following vectors:
318 // Matrix[0]= c0 c1 c2 c3 c4 ... c7
319 // Matrix[1]= m0 m1 m2 m3 m4 ... m7
320 // Matrix[2]= y0 y1 y2 y3 y4 ... y7
321 // Matrix[3]= k0 k1 k2 k3 k4 ... k7
322
323 MVT VT = MVT::v8i16;
324 TransposedMatrix.resize(2);
325 SmallVector<uint32_t, 16> MaskLow;
326 SmallVector<uint32_t, 32> MaskLowTemp1, MaskLowWord;
327 SmallVector<uint32_t, 32> MaskHighTemp1, MaskHighWord;
328
329 for (unsigned i = 0; i < 8; ++i) {
330 MaskLow.push_back(i);
331 MaskLow.push_back(i + 8);
332 }
333
334 createUnpackShuffleMask<uint32_t>(VT, MaskLowTemp1, true, false);
335 createUnpackShuffleMask<uint32_t>(VT, MaskHighTemp1, false, false);
336 scaleShuffleMask<uint32_t>(2, MaskHighTemp1, MaskHighWord);
337 scaleShuffleMask<uint32_t>(2, MaskLowTemp1, MaskLowWord);
338 // IntrVec1Low = c0 m0 c1 m1 c2 m2 c3 m3 c4 m4 c5 m5 c6 m6 c7 m7
339 // IntrVec2Low = y0 k0 y1 k1 y2 k2 y3 k3 y4 k4 y5 k5 y6 k6 y7 k7
340 Value *IntrVec1Low =
341 Builder.CreateShuffleVector(Matrix[0], Matrix[1], MaskLow);
342 Value *IntrVec2Low =
343 Builder.CreateShuffleVector(Matrix[2], Matrix[3], MaskLow);
344
345 // TransposedMatrix[0] = c0 m0 y0 k0 c1 m1 y1 k1 c2 m2 y2 k2 c3 m3 y3 k3
346 // TransposedMatrix[1] = c4 m4 y4 k4 c5 m5 y5 k5 c6 m6 y6 k6 c7 m7 y7 k7
347
348 TransposedMatrix[0] =
349 Builder.CreateShuffleVector(IntrVec1Low, IntrVec2Low, MaskLowWord);
350 TransposedMatrix[1] =
351 Builder.CreateShuffleVector(IntrVec1Low, IntrVec2Low, MaskHighWord);
352}
353
354void X86InterleavedAccessGroup::interleave8bitStride4(
355 ArrayRef<Instruction *> Matrix, SmallVectorImpl<Value *> &TransposedMatrix,
356 unsigned NumOfElm) {
357 // Example: Assuming we start from the following vectors:
358 // Matrix[0]= c0 c1 c2 c3 c4 ... c31
359 // Matrix[1]= m0 m1 m2 m3 m4 ... m31
360 // Matrix[2]= y0 y1 y2 y3 y4 ... y31
361 // Matrix[3]= k0 k1 k2 k3 k4 ... k31
362
363 MVT VT = MVT::getVectorVT(MVT::i8, NumOfElm);
364 MVT HalfVT = scaleVectorType(VT);
365
366 TransposedMatrix.resize(4);
367 SmallVector<uint32_t, 32> MaskHigh;
368 SmallVector<uint32_t, 32> MaskLow;
369 SmallVector<uint32_t, 32> LowHighMask[2];
370 SmallVector<uint32_t, 32> MaskHighTemp;
371 SmallVector<uint32_t, 32> MaskLowTemp;
372
373 // MaskHighTemp and MaskLowTemp built in the vpunpckhbw and vpunpcklbw X86
374 // shuffle pattern.
375
376 createUnpackShuffleMask<uint32_t>(VT, MaskLow, true, false);
377 createUnpackShuffleMask<uint32_t>(VT, MaskHigh, false, false);
378
379 // MaskHighTemp1 and MaskLowTemp1 built in the vpunpckhdw and vpunpckldw X86
380 // shuffle pattern.
381
382 createUnpackShuffleMask<uint32_t>(HalfVT, MaskLowTemp, true, false);
383 createUnpackShuffleMask<uint32_t>(HalfVT, MaskHighTemp, false, false);
384 scaleShuffleMask<uint32_t>(2, MaskLowTemp, LowHighMask[0]);
385 scaleShuffleMask<uint32_t>(2, MaskHighTemp, LowHighMask[1]);
386
387 // IntrVec1Low = c0 m0 c1 m1 ... c7 m7 | c16 m16 c17 m17 ... c23 m23
388 // IntrVec1High = c8 m8 c9 m9 ... c15 m15 | c24 m24 c25 m25 ... c31 m31
389 // IntrVec2Low = y0 k0 y1 k1 ... y7 k7 | y16 k16 y17 k17 ... y23 k23
390 // IntrVec2High = y8 k8 y9 k9 ... y15 k15 | y24 k24 y25 k25 ... y31 k31
391 Value *IntrVec[4];
392
393 IntrVec[0] = Builder.CreateShuffleVector(Matrix[0], Matrix[1], MaskLow);
394 IntrVec[1] = Builder.CreateShuffleVector(Matrix[0], Matrix[1], MaskHigh);
395 IntrVec[2] = Builder.CreateShuffleVector(Matrix[2], Matrix[3], MaskLow);
396 IntrVec[3] = Builder.CreateShuffleVector(Matrix[2], Matrix[3], MaskHigh);
397
398 // cmyk4 cmyk5 cmyk6 cmyk7 | cmyk20 cmyk21 cmyk22 cmyk23
399 // cmyk12 cmyk13 cmyk14 cmyk15 | cmyk28 cmyk29 cmyk30 cmyk31
400 // cmyk0 cmyk1 cmyk2 cmyk3 | cmyk16 cmyk17 cmyk18 cmyk19
401 // cmyk8 cmyk9 cmyk10 cmyk11 | cmyk24 cmyk25 cmyk26 cmyk27
402
403 Value *VecOut[4];
404 for (int i = 0; i < 4; i++)
405 VecOut[i] = Builder.CreateShuffleVector(IntrVec[i / 2], IntrVec[i / 2 + 2],
406 LowHighMask[i % 2]);
407
408 // cmyk0 cmyk1 cmyk2 cmyk3 | cmyk4 cmyk5 cmyk6 cmyk7
409 // cmyk8 cmyk9 cmyk10 cmyk11 | cmyk12 cmyk13 cmyk14 cmyk15
410 // cmyk16 cmyk17 cmyk18 cmyk19 | cmyk20 cmyk21 cmyk22 cmyk23
411 // cmyk24 cmyk25 cmyk26 cmyk27 | cmyk28 cmyk29 cmyk30 cmyk31
412
413 if (VT == MVT::v16i8) {
414 std::copy(VecOut, VecOut + 4, TransposedMatrix.begin());
415 return;
416 }
417
418 reorderSubVector(VT, TransposedMatrix, VecOut, makeArrayRef(Concat, 16),
419 NumOfElm, 4, Builder);
420}
421
422// createShuffleStride returns shuffle mask of size N.
423// The shuffle pattern is as following :
424// {0, Stride%(VF/Lane), (2*Stride%(VF/Lane))...(VF*Stride/Lane)%(VF/Lane),
425// (VF/ Lane) ,(VF / Lane)+Stride%(VF/Lane),...,
426// (VF / Lane)+(VF*Stride/Lane)%(VF/Lane)}
427// Where Lane is the # of lanes in a register:
428// VectorSize = 128 => Lane = 1
429// VectorSize = 256 => Lane = 2
430// For example shuffle pattern for VF 16 register size 256 -> lanes = 2
431// {<[0|3|6|1|4|7|2|5]-[8|11|14|9|12|15|10|13]>}
432static void createShuffleStride(MVT VT, int Stride,
433 SmallVectorImpl<uint32_t> &Mask) {
434 int VectorSize = VT.getSizeInBits();
435 int VF = VT.getVectorNumElements();
436 int LaneCount = std::max(VectorSize / 128, 1);
437 for (int Lane = 0; Lane < LaneCount; Lane++)
438 for (int i = 0, LaneSize = VF / LaneCount; i != LaneSize; ++i)
439 Mask.push_back((i * Stride) % LaneSize + LaneSize * Lane);
440}
441
442// setGroupSize sets 'SizeInfo' to the size(number of elements) of group
443// inside mask a shuffleMask. A mask contains exactly 3 groups, where
444// each group is a monotonically increasing sequence with stride 3.
445// For example shuffleMask {0,3,6,1,4,7,2,5} => {3,3,2}
446static void setGroupSize(MVT VT, SmallVectorImpl<uint32_t> &SizeInfo) {
447 int VectorSize = VT.getSizeInBits();
448 int VF = VT.getVectorNumElements() / std::max(VectorSize / 128, 1);
449 for (int i = 0, FirstGroupElement = 0; i < 3; i++) {
450 int GroupSize = std::ceil((VF - FirstGroupElement) / 3.0);
451 SizeInfo.push_back(GroupSize);
452 FirstGroupElement = ((GroupSize)*3 + FirstGroupElement) % VF;
453 }
454}
455
456// DecodePALIGNRMask returns the shuffle mask of vpalign instruction.
457// vpalign works according to lanes
458// Where Lane is the # of lanes in a register:
459// VectorWide = 128 => Lane = 1
460// VectorWide = 256 => Lane = 2
461// For Lane = 1 shuffle pattern is: {DiffToJump,...,DiffToJump+VF-1}.
462// For Lane = 2 shuffle pattern is:
463// {DiffToJump,...,VF/2-1,VF,...,DiffToJump+VF-1}.
464// Imm variable sets the offset amount. The result of the
465// function is stored inside ShuffleMask vector and it built as described in
466// the begin of the description. AlignDirection is a boolean that indecat the
467// direction of the alignment. (false - align to the "right" side while true -
468// align to the "left" side)
469static void DecodePALIGNRMask(MVT VT, unsigned Imm,
470 SmallVectorImpl<uint32_t> &ShuffleMask,
471 bool AlignDirection = true, bool Unary = false) {
472 unsigned NumElts = VT.getVectorNumElements();
473 unsigned NumLanes = std::max((int)VT.getSizeInBits() / 128, 1);
474 unsigned NumLaneElts = NumElts / NumLanes;
475
476 Imm = AlignDirection ? Imm : (NumLaneElts - Imm);
477 unsigned Offset = Imm * (VT.getScalarSizeInBits() / 8);
478
479 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
480 for (unsigned i = 0; i != NumLaneElts; ++i) {
481 unsigned Base = i + Offset;
482 // if i+offset is out of this lane then we actually need the other source
483 // If Unary the other source is the first source.
484 if (Base >= NumLaneElts)
485 Base = Unary ? Base % NumLaneElts : Base + NumElts - NumLaneElts;
486 ShuffleMask.push_back(Base + l);
487 }
488 }
489}
490
491// concatSubVector - The function rebuilds the data to a correct expected
492// order. An assumption(The shape of the matrix) was taken for the
493// deinterleaved to work with lane's instructions like 'vpalign' or 'vphuf'.
494// This function ensures that the data is built in correct way for the lane
495// instructions. Each lane inside the vector is a 128-bit length.
496//
497// The 'InVec' argument contains the data in increasing order. In InVec[0] You
498// can find the first 128 bit data. The number of different lanes inside a
499// vector depends on the 'VecElems'.In general, the formula is
500// VecElems * type / 128. The size of the array 'InVec' depends and equal to
501// 'VecElems'.
502
503// For VecElems = 16
504// Invec[0] - |0| Vec[0] - |0|
505// Invec[1] - |1| => Vec[1] - |1|
506// Invec[2] - |2| Vec[2] - |2|
507
508// For VecElems = 32
509// Invec[0] - |0|1| Vec[0] - |0|3|
510// Invec[1] - |2|3| => Vec[1] - |1|4|
511// Invec[2] - |4|5| Vec[2] - |2|5|
512
513// For VecElems = 64
514// Invec[0] - |0|1|2 |3 | Vec[0] - |0|3|6|9 |
515// Invec[1] - |4|5|6 |7 | => Vec[1] - |1|4|7|10|
516// Invec[2] - |8|9|10|11| Vec[2] - |2|5|8|11|
517
518static void concatSubVector(Value **Vec, ArrayRef<Instruction *> InVec,
519 unsigned VecElems, IRBuilder<> Builder) {
520 if (VecElems == 16) {
5
Assuming 'VecElems' is not equal to 16
6
Taking false branch
521 for (int i = 0; i < 3; i++)
522 Vec[i] = InVec[i];
523 return;
524 }
525
526 for (unsigned j = 0; j < VecElems / 32; j++)
7
Assuming the condition is true
8
Loop condition is true. Entering loop body
149
Assuming the condition is false
150
Loop condition is false. Execution continues on line 531
527 for (int i = 0; i < 3; i++)
9
Loop condition is true. Entering loop body
56
Loop condition is true. Entering loop body
102
Loop condition is true. Entering loop body
148
Loop condition is false. Execution continues on line 526
528 Vec[i + j * 3] = Builder.CreateShuffleVector(
12
Calling 'IRBuilder::CreateShuffleVector'
55
Returning from 'IRBuilder::CreateShuffleVector'
59
Calling 'IRBuilder::CreateShuffleVector'
101
Returning from 'IRBuilder::CreateShuffleVector'
105
Calling 'IRBuilder::CreateShuffleVector'
147
Returning from 'IRBuilder::CreateShuffleVector'
529 InVec[j * 6 + i], InVec[j * 6 + i + 3], makeArrayRef(Concat, 32));
10
Calling 'makeArrayRef'
11
Returning from 'makeArrayRef'
57
Calling 'makeArrayRef'
58
Returning from 'makeArrayRef'
103
Calling 'makeArrayRef'
104
Returning from 'makeArrayRef'
530
531 if (VecElems == 32)
151
Assuming 'VecElems' is not equal to 32
152
Taking false branch
532 return;
533
534 for (int i = 0; i < 3; i++)
153
Loop condition is true. Entering loop body
535 Vec[i] = Builder.CreateShuffleVector(Vec[i], Vec[i + 3], Concat);
154
2nd function call argument is an uninitialized value
536}
537
538void X86InterleavedAccessGroup::deinterleave8bitStride3(
539 ArrayRef<Instruction *> InVec, SmallVectorImpl<Value *> &TransposedMatrix,
540 unsigned VecElems) {
541 // Example: Assuming we start from the following vectors:
542 // Matrix[0]= a0 b0 c0 a1 b1 c1 a2 b2
543 // Matrix[1]= c2 a3 b3 c3 a4 b4 c4 a5
544 // Matrix[2]= b5 c5 a6 b6 c6 a7 b7 c7
545
546 TransposedMatrix.resize(3);
547 SmallVector<uint32_t, 32> VPShuf;
548 SmallVector<uint32_t, 32> VPAlign[2];
549 SmallVector<uint32_t, 32> VPAlign2;
550 SmallVector<uint32_t, 32> VPAlign3;
551 SmallVector<uint32_t, 3> GroupSize;
552 Value *Vec[6], *TempVector[3];
553
554 MVT VT = MVT::getVT(Shuffles[0]->getType());
555
556 createShuffleStride(VT, 3, VPShuf);
557 setGroupSize(VT, GroupSize);
558
559 for (int i = 0; i < 2; i++)
1
Loop condition is true. Entering loop body
2
Loop condition is true. Entering loop body
3
Loop condition is false. Execution continues on line 562
560 DecodePALIGNRMask(VT, GroupSize[2 - i], VPAlign[i], false);
561
562 DecodePALIGNRMask(VT, GroupSize[2] + GroupSize[1], VPAlign2, true, true);
563 DecodePALIGNRMask(VT, GroupSize[1], VPAlign3, true, true);
564
565 concatSubVector(Vec, InVec, VecElems, Builder);
4
Calling 'concatSubVector'
566 // Vec[0]= a0 a1 a2 b0 b1 b2 c0 c1
567 // Vec[1]= c2 c3 c4 a3 a4 a5 b3 b4
568 // Vec[2]= b5 b6 b7 c5 c6 c7 a6 a7
569
570 for (int i = 0; i < 3; i++)
571 Vec[i] = Builder.CreateShuffleVector(
572 Vec[i], UndefValue::get(Vec[0]->getType()), VPShuf);
573
574 // TempVector[0]= a6 a7 a0 a1 a2 b0 b1 b2
575 // TempVector[1]= c0 c1 c2 c3 c4 a3 a4 a5
576 // TempVector[2]= b3 b4 b5 b6 b7 c5 c6 c7
577
578 for (int i = 0; i < 3; i++)
579 TempVector[i] =
580 Builder.CreateShuffleVector(Vec[(i + 2) % 3], Vec[i], VPAlign[0]);
581
582 // Vec[0]= a3 a4 a5 a6 a7 a0 a1 a2
583 // Vec[1]= c5 c6 c7 c0 c1 c2 c3 c4
584 // Vec[2]= b0 b1 b2 b3 b4 b5 b6 b7
585
586 for (int i = 0; i < 3; i++)
587 Vec[i] = Builder.CreateShuffleVector(TempVector[(i + 1) % 3], TempVector[i],
588 VPAlign[1]);
589
590 // TransposedMatrix[0]= a0 a1 a2 a3 a4 a5 a6 a7
591 // TransposedMatrix[1]= b0 b1 b2 b3 b4 b5 b6 b7
592 // TransposedMatrix[2]= c0 c1 c2 c3 c4 c5 c6 c7
593
594 Value *TempVec = Builder.CreateShuffleVector(
595 Vec[1], UndefValue::get(Vec[1]->getType()), VPAlign3);
596 TransposedMatrix[0] = Builder.CreateShuffleVector(
597 Vec[0], UndefValue::get(Vec[1]->getType()), VPAlign2);
598 TransposedMatrix[1] = VecElems == 8 ? Vec[2] : TempVec;
599 TransposedMatrix[2] = VecElems == 8 ? TempVec : Vec[2];
600}
601
602// group2Shuffle reorder the shuffle stride back into continuous order.
603// For example For VF16 with Mask1 = {0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13} =>
604// MaskResult = {0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5}.
605static void group2Shuffle(MVT VT, SmallVectorImpl<uint32_t> &Mask,
606 SmallVectorImpl<uint32_t> &Output) {
607 int IndexGroup[3] = {0, 0, 0};
608 int Index = 0;
609 int VectorWidth = VT.getSizeInBits();
610 int VF = VT.getVectorNumElements();
611 // Find the index of the different groups.
612 int Lane = (VectorWidth / 128 > 0) ? VectorWidth / 128 : 1;
613 for (int i = 0; i < 3; i++) {
614 IndexGroup[(Index * 3) % (VF / Lane)] = Index;
615 Index += Mask[i];
616 }
617 // According to the index compute the convert mask.
618 for (int i = 0; i < VF / Lane; i++) {
619 Output.push_back(IndexGroup[i % 3]);
620 IndexGroup[i % 3]++;
621 }
622}
623
624void X86InterleavedAccessGroup::interleave8bitStride3(
625 ArrayRef<Instruction *> InVec, SmallVectorImpl<Value *> &TransposedMatrix,
626 unsigned VecElems) {
627 // Example: Assuming we start from the following vectors:
628 // Matrix[0]= a0 a1 a2 a3 a4 a5 a6 a7
629 // Matrix[1]= b0 b1 b2 b3 b4 b5 b6 b7
630 // Matrix[2]= c0 c1 c2 c3 c3 a7 b7 c7
631
632 TransposedMatrix.resize(3);
633 SmallVector<uint32_t, 3> GroupSize;
634 SmallVector<uint32_t, 32> VPShuf;
635 SmallVector<uint32_t, 32> VPAlign[3];
636 SmallVector<uint32_t, 32> VPAlign2;
637 SmallVector<uint32_t, 32> VPAlign3;
638
639 Value *Vec[3], *TempVector[3];
640 MVT VT = MVT::getVectorVT(MVT::i8, VecElems);
641
642 setGroupSize(VT, GroupSize);
643
644 for (int i = 0; i < 3; i++)
645 DecodePALIGNRMask(VT, GroupSize[i], VPAlign[i]);
646
647 DecodePALIGNRMask(VT, GroupSize[1] + GroupSize[2], VPAlign2, false, true);
648 DecodePALIGNRMask(VT, GroupSize[1], VPAlign3, false, true);
649
650 // Vec[0]= a3 a4 a5 a6 a7 a0 a1 a2
651 // Vec[1]= c5 c6 c7 c0 c1 c2 c3 c4
652 // Vec[2]= b0 b1 b2 b3 b4 b5 b6 b7
653
654 Vec[0] = Builder.CreateShuffleVector(
655 InVec[0], UndefValue::get(InVec[0]->getType()), VPAlign2);
656 Vec[1] = Builder.CreateShuffleVector(
657 InVec[1], UndefValue::get(InVec[1]->getType()), VPAlign3);
658 Vec[2] = InVec[2];
659
660 // Vec[0]= a6 a7 a0 a1 a2 b0 b1 b2
661 // Vec[1]= c0 c1 c2 c3 c4 a3 a4 a5
662 // Vec[2]= b3 b4 b5 b6 b7 c5 c6 c7
663
664 for (int i = 0; i < 3; i++)
665 TempVector[i] =
666 Builder.CreateShuffleVector(Vec[i], Vec[(i + 2) % 3], VPAlign[1]);
667
668 // Vec[0]= a0 a1 a2 b0 b1 b2 c0 c1
669 // Vec[1]= c2 c3 c4 a3 a4 a5 b3 b4
670 // Vec[2]= b5 b6 b7 c5 c6 c7 a6 a7
671
672 for (int i = 0; i < 3; i++)
673 Vec[i] = Builder.CreateShuffleVector(TempVector[i], TempVector[(i + 1) % 3],
674 VPAlign[2]);
675
676 // TransposedMatrix[0] = a0 b0 c0 a1 b1 c1 a2 b2
677 // TransposedMatrix[1] = c2 a3 b3 c3 a4 b4 c4 a5
678 // TransposedMatrix[2] = b5 c5 a6 b6 c6 a7 b7 c7
679
680 unsigned NumOfElm = VT.getVectorNumElements();
681 group2Shuffle(VT, GroupSize, VPShuf);
682 reorderSubVector(VT, TransposedMatrix, Vec, VPShuf, NumOfElm,3, Builder);
683}
684
685void X86InterleavedAccessGroup::transpose_4x4(
686 ArrayRef<Instruction *> Matrix,
687 SmallVectorImpl<Value *> &TransposedMatrix) {
688 assert(Matrix.size() == 4 && "Invalid matrix size")(static_cast <bool> (Matrix.size() == 4 && "Invalid matrix size"
) ? void (0) : __assert_fail ("Matrix.size() == 4 && \"Invalid matrix size\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/X86/X86InterleavedAccess.cpp"
, 688, __extension__ __PRETTY_FUNCTION__))
;
689 TransposedMatrix.resize(4);
690
691 // dst = src1[0,1],src2[0,1]
692 uint32_t IntMask1[] = {0, 1, 4, 5};
693 ArrayRef<uint32_t> Mask = makeArrayRef(IntMask1, 4);
694 Value *IntrVec1 = Builder.CreateShuffleVector(Matrix[0], Matrix[2], Mask);
695 Value *IntrVec2 = Builder.CreateShuffleVector(Matrix[1], Matrix[3], Mask);
696
697 // dst = src1[2,3],src2[2,3]
698 uint32_t IntMask2[] = {2, 3, 6, 7};
699 Mask = makeArrayRef(IntMask2, 4);
700 Value *IntrVec3 = Builder.CreateShuffleVector(Matrix[0], Matrix[2], Mask);
701 Value *IntrVec4 = Builder.CreateShuffleVector(Matrix[1], Matrix[3], Mask);
702
703 // dst = src1[0],src2[0],src1[2],src2[2]
704 uint32_t IntMask3[] = {0, 4, 2, 6};
705 Mask = makeArrayRef(IntMask3, 4);
706 TransposedMatrix[0] = Builder.CreateShuffleVector(IntrVec1, IntrVec2, Mask);
707 TransposedMatrix[2] = Builder.CreateShuffleVector(IntrVec3, IntrVec4, Mask);
708
709 // dst = src1[1],src2[1],src1[3],src2[3]
710 uint32_t IntMask4[] = {1, 5, 3, 7};
711 Mask = makeArrayRef(IntMask4, 4);
712 TransposedMatrix[1] = Builder.CreateShuffleVector(IntrVec1, IntrVec2, Mask);
713 TransposedMatrix[3] = Builder.CreateShuffleVector(IntrVec3, IntrVec4, Mask);
714}
715
716// Lowers this interleaved access group into X86-specific
717// instructions/intrinsics.
718bool X86InterleavedAccessGroup::lowerIntoOptimizedSequence() {
719 SmallVector<Instruction *, 4> DecomposedVectors;
720 SmallVector<Value *, 4> TransposedVectors;
721 VectorType *ShuffleTy = Shuffles[0]->getType();
722
723 if (isa<LoadInst>(Inst)) {
724 // Try to generate target-sized register(/instruction).
725 decompose(Inst, Factor, ShuffleTy, DecomposedVectors);
726
727 Type *ShuffleEltTy = Inst->getType();
728 unsigned NumSubVecElems = ShuffleEltTy->getVectorNumElements() / Factor;
729 // Perform matrix-transposition in order to compute interleaved
730 // results by generating some sort of (optimized) target-specific
731 // instructions.
732
733 switch (NumSubVecElems) {
734 default:
735 return false;
736 case 4:
737 transpose_4x4(DecomposedVectors, TransposedVectors);
738 break;
739 case 8:
740 case 16:
741 case 32:
742 case 64:
743 deinterleave8bitStride3(DecomposedVectors, TransposedVectors,
744 NumSubVecElems);
745 break;
746 }
747
748 // Now replace the unoptimized-interleaved-vectors with the
749 // transposed-interleaved vectors.
750 for (unsigned i = 0, e = Shuffles.size(); i < e; ++i)
751 Shuffles[i]->replaceAllUsesWith(TransposedVectors[Indices[i]]);
752
753 return true;
754 }
755
756 Type *ShuffleEltTy = ShuffleTy->getVectorElementType();
757 unsigned NumSubVecElems = ShuffleTy->getVectorNumElements() / Factor;
758
759 // Lower the interleaved stores:
760 // 1. Decompose the interleaved wide shuffle into individual shuffle
761 // vectors.
762 decompose(Shuffles[0], Factor, VectorType::get(ShuffleEltTy, NumSubVecElems),
763 DecomposedVectors);
764
765 // 2. Transpose the interleaved-vectors into vectors of contiguous
766 // elements.
767 switch (NumSubVecElems) {
768 case 4:
769 transpose_4x4(DecomposedVectors, TransposedVectors);
770 break;
771 case 8:
772 interleave8bitStride4VF8(DecomposedVectors, TransposedVectors);
773 break;
774 case 16:
775 case 32:
776 case 64:
777 if (Factor == 4)
778 interleave8bitStride4(DecomposedVectors, TransposedVectors,
779 NumSubVecElems);
780 if (Factor == 3)
781 interleave8bitStride3(DecomposedVectors, TransposedVectors,
782 NumSubVecElems);
783 break;
784 default:
785 return false;
786 }
787
788 // 3. Concatenate the contiguous-vectors back into a wide vector.
789 Value *WideVec = concatenateVectors(Builder, TransposedVectors);
790
791 // 4. Generate a store instruction for wide-vec.
792 StoreInst *SI = cast<StoreInst>(Inst);
793 Builder.CreateAlignedStore(WideVec, SI->getPointerOperand(),
794 SI->getAlignment());
795
796 return true;
797}
798
799// Lower interleaved load(s) into target specific instructions/
800// intrinsics. Lowering sequence varies depending on the vector-types, factor,
801// number of shuffles and ISA.
802// Currently, lowering is supported for 4x64 bits with Factor = 4 on AVX.
803bool X86TargetLowering::lowerInterleavedLoad(
804 LoadInst *LI, ArrayRef<ShuffleVectorInst *> Shuffles,
805 ArrayRef<unsigned> Indices, unsigned Factor) const {
806 assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&(static_cast <bool> (Factor >= 2 && Factor <=
getMaxSupportedInterleaveFactor() && "Invalid interleave factor"
) ? void (0) : __assert_fail ("Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && \"Invalid interleave factor\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/X86/X86InterleavedAccess.cpp"
, 807, __extension__ __PRETTY_FUNCTION__))
807 "Invalid interleave factor")(static_cast <bool> (Factor >= 2 && Factor <=
getMaxSupportedInterleaveFactor() && "Invalid interleave factor"
) ? void (0) : __assert_fail ("Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && \"Invalid interleave factor\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/X86/X86InterleavedAccess.cpp"
, 807, __extension__ __PRETTY_FUNCTION__))
;
808 assert(!Shuffles.empty() && "Empty shufflevector input")(static_cast <bool> (!Shuffles.empty() && "Empty shufflevector input"
) ? void (0) : __assert_fail ("!Shuffles.empty() && \"Empty shufflevector input\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/X86/X86InterleavedAccess.cpp"
, 808, __extension__ __PRETTY_FUNCTION__))
;
809 assert(Shuffles.size() == Indices.size() &&(static_cast <bool> (Shuffles.size() == Indices.size() &&
"Unmatched number of shufflevectors and indices") ? void (0)
: __assert_fail ("Shuffles.size() == Indices.size() && \"Unmatched number of shufflevectors and indices\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/X86/X86InterleavedAccess.cpp"
, 810, __extension__ __PRETTY_FUNCTION__))
810 "Unmatched number of shufflevectors and indices")(static_cast <bool> (Shuffles.size() == Indices.size() &&
"Unmatched number of shufflevectors and indices") ? void (0)
: __assert_fail ("Shuffles.size() == Indices.size() && \"Unmatched number of shufflevectors and indices\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/X86/X86InterleavedAccess.cpp"
, 810, __extension__ __PRETTY_FUNCTION__))
;
811
812 // Create an interleaved access group.
813 IRBuilder<> Builder(LI);
814 X86InterleavedAccessGroup Grp(LI, Shuffles, Indices, Factor, Subtarget,
815 Builder);
816
817 return Grp.isSupported() && Grp.lowerIntoOptimizedSequence();
818}
819
820bool X86TargetLowering::lowerInterleavedStore(StoreInst *SI,
821 ShuffleVectorInst *SVI,
822 unsigned Factor) const {
823 assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&(static_cast <bool> (Factor >= 2 && Factor <=
getMaxSupportedInterleaveFactor() && "Invalid interleave factor"
) ? void (0) : __assert_fail ("Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && \"Invalid interleave factor\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/X86/X86InterleavedAccess.cpp"
, 824, __extension__ __PRETTY_FUNCTION__))
824 "Invalid interleave factor")(static_cast <bool> (Factor >= 2 && Factor <=
getMaxSupportedInterleaveFactor() && "Invalid interleave factor"
) ? void (0) : __assert_fail ("Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && \"Invalid interleave factor\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/X86/X86InterleavedAccess.cpp"
, 824, __extension__ __PRETTY_FUNCTION__))
;
825
826 assert(SVI->getType()->getVectorNumElements() % Factor == 0 &&(static_cast <bool> (SVI->getType()->getVectorNumElements
() % Factor == 0 && "Invalid interleaved store") ? void
(0) : __assert_fail ("SVI->getType()->getVectorNumElements() % Factor == 0 && \"Invalid interleaved store\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/X86/X86InterleavedAccess.cpp"
, 827, __extension__ __PRETTY_FUNCTION__))
827 "Invalid interleaved store")(static_cast <bool> (SVI->getType()->getVectorNumElements
() % Factor == 0 && "Invalid interleaved store") ? void
(0) : __assert_fail ("SVI->getType()->getVectorNumElements() % Factor == 0 && \"Invalid interleaved store\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Target/X86/X86InterleavedAccess.cpp"
, 827, __extension__ __PRETTY_FUNCTION__))
;
828
829 // Holds the indices of SVI that correspond to the starting index of each
830 // interleaved shuffle.
831 SmallVector<unsigned, 4> Indices;
832 auto Mask = SVI->getShuffleMask();
833 for (unsigned i = 0; i < Factor; i++)
834 Indices.push_back(Mask[i]);
835
836 ArrayRef<ShuffleVectorInst *> Shuffles = makeArrayRef(SVI);
837
838 // Create an interleaved access group.
839 IRBuilder<> Builder(SI);
840 X86InterleavedAccessGroup Grp(SI, Shuffles, Indices, Factor, Subtarget,
841 Builder);
842
843 return Grp.isSupported() && Grp.lowerIntoOptimizedSequence();
844}

/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/IR/IRBuilder.h

1//===---- llvm/IRBuilder.h - Builder for LLVM Instructions ------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the IRBuilder class, which is used as a convenient way
11// to create LLVM instructions with a consistent and simplified interface.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_IR_IRBUILDER_H
16#define LLVM_IR_IRBUILDER_H
17
18#include "llvm-c/Types.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/None.h"
21#include "llvm/ADT/StringRef.h"
22#include "llvm/ADT/Twine.h"
23#include "llvm/IR/BasicBlock.h"
24#include "llvm/IR/Constant.h"
25#include "llvm/IR/ConstantFolder.h"
26#include "llvm/IR/Constants.h"
27#include "llvm/IR/DataLayout.h"
28#include "llvm/IR/DebugLoc.h"
29#include "llvm/IR/DerivedTypes.h"
30#include "llvm/IR/Function.h"
31#include "llvm/IR/GlobalVariable.h"
32#include "llvm/IR/InstrTypes.h"
33#include "llvm/IR/Instruction.h"
34#include "llvm/IR/Instructions.h"
35#include "llvm/IR/Intrinsics.h"
36#include "llvm/IR/LLVMContext.h"
37#include "llvm/IR/Module.h"
38#include "llvm/IR/Operator.h"
39#include "llvm/IR/Type.h"
40#include "llvm/IR/Value.h"
41#include "llvm/IR/ValueHandle.h"
42#include "llvm/Support/AtomicOrdering.h"
43#include "llvm/Support/CBindingWrapping.h"
44#include "llvm/Support/Casting.h"
45#include <algorithm>
46#include <cassert>
47#include <cstddef>
48#include <cstdint>
49#include <functional>
50
51namespace llvm {
52
53class APInt;
54class MDNode;
55class Module;
56class Use;
57
58/// \brief This provides the default implementation of the IRBuilder
59/// 'InsertHelper' method that is called whenever an instruction is created by
60/// IRBuilder and needs to be inserted.
61///
62/// By default, this inserts the instruction at the insertion point.
63class IRBuilderDefaultInserter {
64protected:
65 void InsertHelper(Instruction *I, const Twine &Name,
66 BasicBlock *BB, BasicBlock::iterator InsertPt) const {
67 if (BB) BB->getInstList().insert(InsertPt, I);
38
Assuming 'BB' is null
39
Taking false branch
85
Taking false branch
131
Taking false branch
68 I->setName(Name);
69 }
70};
71
72/// Provides an 'InsertHelper' that calls a user-provided callback after
73/// performing the default insertion.
74class IRBuilderCallbackInserter : IRBuilderDefaultInserter {
75 std::function<void(Instruction *)> Callback;
76
77public:
78 IRBuilderCallbackInserter(std::function<void(Instruction *)> Callback)
79 : Callback(std::move(Callback)) {}
80
81protected:
82 void InsertHelper(Instruction *I, const Twine &Name,
83 BasicBlock *BB, BasicBlock::iterator InsertPt) const {
84 IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt);
85 Callback(I);
86 }
87};
88
89/// \brief Common base class shared among various IRBuilders.
90class IRBuilderBase {
91 DebugLoc CurDbgLocation;
92
93protected:
94 BasicBlock *BB;
95 BasicBlock::iterator InsertPt;
96 LLVMContext &Context;
97
98 MDNode *DefaultFPMathTag;
99 FastMathFlags FMF;
100
101 ArrayRef<OperandBundleDef> DefaultOperandBundles;
102
103public:
104 IRBuilderBase(LLVMContext &context, MDNode *FPMathTag = nullptr,
105 ArrayRef<OperandBundleDef> OpBundles = None)
106 : Context(context), DefaultFPMathTag(FPMathTag),
107 DefaultOperandBundles(OpBundles) {
108 ClearInsertionPoint();
109 }
110
111 //===--------------------------------------------------------------------===//
112 // Builder configuration methods
113 //===--------------------------------------------------------------------===//
114
115 /// \brief Clear the insertion point: created instructions will not be
116 /// inserted into a block.
117 void ClearInsertionPoint() {
118 BB = nullptr;
119 InsertPt = BasicBlock::iterator();
120 }
121
122 BasicBlock *GetInsertBlock() const { return BB; }
123 BasicBlock::iterator GetInsertPoint() const { return InsertPt; }
124 LLVMContext &getContext() const { return Context; }
125
126 /// \brief This specifies that created instructions should be appended to the
127 /// end of the specified block.
128 void SetInsertPoint(BasicBlock *TheBB) {
129 BB = TheBB;
130 InsertPt = BB->end();
131 }
132
133 /// \brief This specifies that created instructions should be inserted before
134 /// the specified instruction.
135 void SetInsertPoint(Instruction *I) {
136 BB = I->getParent();
137 InsertPt = I->getIterator();
138 assert(InsertPt != BB->end() && "Can't read debug loc from end()")(static_cast <bool> (InsertPt != BB->end() &&
"Can't read debug loc from end()") ? void (0) : __assert_fail
("InsertPt != BB->end() && \"Can't read debug loc from end()\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/IR/IRBuilder.h"
, 138, __extension__ __PRETTY_FUNCTION__))
;
139 SetCurrentDebugLocation(I->getDebugLoc());
140 }
141
142 /// \brief This specifies that created instructions should be inserted at the
143 /// specified point.
144 void SetInsertPoint(BasicBlock *TheBB, BasicBlock::iterator IP) {
145 BB = TheBB;
146 InsertPt = IP;
147 if (IP != TheBB->end())
148 SetCurrentDebugLocation(IP->getDebugLoc());
149 }
150
151 /// \brief Set location information used by debugging information.
152 void SetCurrentDebugLocation(DebugLoc L) { CurDbgLocation = std::move(L); }
153
154 /// \brief Get location information used by debugging information.
155 const DebugLoc &getCurrentDebugLocation() const { return CurDbgLocation; }
156
157 /// \brief If this builder has a current debug location, set it on the
158 /// specified instruction.
159 void SetInstDebugLocation(Instruction *I) const {
160 if (CurDbgLocation)
42
Calling 'DebugLoc::operator bool'
49
Returning from 'DebugLoc::operator bool'
50
Assuming the condition is false
51
Taking false branch
88
Calling 'DebugLoc::operator bool'
95
Returning from 'DebugLoc::operator bool'
96
Assuming the condition is false
97
Taking false branch
134
Calling 'DebugLoc::operator bool'
141
Returning from 'DebugLoc::operator bool'
142
Assuming the condition is false
143
Taking false branch
161 I->setDebugLoc(CurDbgLocation);
162 }
163
164 /// \brief Get the return type of the current function that we're emitting
165 /// into.
166 Type *getCurrentFunctionReturnType() const;
167
168 /// InsertPoint - A saved insertion point.
169 class InsertPoint {
170 BasicBlock *Block = nullptr;
171 BasicBlock::iterator Point;
172
173 public:
174 /// \brief Creates a new insertion point which doesn't point to anything.
175 InsertPoint() = default;
176
177 /// \brief Creates a new insertion point at the given location.
178 InsertPoint(BasicBlock *InsertBlock, BasicBlock::iterator InsertPoint)
179 : Block(InsertBlock), Point(InsertPoint) {}
180
181 /// \brief Returns true if this insert point is set.
182 bool isSet() const { return (Block != nullptr); }
183
184 BasicBlock *getBlock() const { return Block; }
185 BasicBlock::iterator getPoint() const { return Point; }
186 };
187
188 /// \brief Returns the current insert point.
189 InsertPoint saveIP() const {
190 return InsertPoint(GetInsertBlock(), GetInsertPoint());
191 }
192
193 /// \brief Returns the current insert point, clearing it in the process.
194 InsertPoint saveAndClearIP() {
195 InsertPoint IP(GetInsertBlock(), GetInsertPoint());
196 ClearInsertionPoint();
197 return IP;
198 }
199
200 /// \brief Sets the current insert point to a previously-saved location.
201 void restoreIP(InsertPoint IP) {
202 if (IP.isSet())
203 SetInsertPoint(IP.getBlock(), IP.getPoint());
204 else
205 ClearInsertionPoint();
206 }
207
208 /// \brief Get the floating point math metadata being used.
209 MDNode *getDefaultFPMathTag() const { return DefaultFPMathTag; }
210
211 /// \brief Get the flags to be applied to created floating point ops
212 FastMathFlags getFastMathFlags() const { return FMF; }
213
214 /// \brief Clear the fast-math flags.
215 void clearFastMathFlags() { FMF.clear(); }
216
217 /// \brief Set the floating point math metadata to be used.
218 void setDefaultFPMathTag(MDNode *FPMathTag) { DefaultFPMathTag = FPMathTag; }
219
220 /// \brief Set the fast-math flags to be used with generated fp-math operators
221 void setFastMathFlags(FastMathFlags NewFMF) { FMF = NewFMF; }
222
223 //===--------------------------------------------------------------------===//
224 // RAII helpers.
225 //===--------------------------------------------------------------------===//
226
227 // \brief RAII object that stores the current insertion point and restores it
228 // when the object is destroyed. This includes the debug location.
229 class InsertPointGuard {
230 IRBuilderBase &Builder;
231 AssertingVH<BasicBlock> Block;
232 BasicBlock::iterator Point;
233 DebugLoc DbgLoc;
234
235 public:
236 InsertPointGuard(IRBuilderBase &B)
237 : Builder(B), Block(B.GetInsertBlock()), Point(B.GetInsertPoint()),
238 DbgLoc(B.getCurrentDebugLocation()) {}
239
240 InsertPointGuard(const InsertPointGuard &) = delete;
241 InsertPointGuard &operator=(const InsertPointGuard &) = delete;
242
243 ~InsertPointGuard() {
244 Builder.restoreIP(InsertPoint(Block, Point));
245 Builder.SetCurrentDebugLocation(DbgLoc);
246 }
247 };
248
249 // \brief RAII object that stores the current fast math settings and restores
250 // them when the object is destroyed.
251 class FastMathFlagGuard {
252 IRBuilderBase &Builder;
253 FastMathFlags FMF;
254 MDNode *FPMathTag;
255
256 public:
257 FastMathFlagGuard(IRBuilderBase &B)
258 : Builder(B), FMF(B.FMF), FPMathTag(B.DefaultFPMathTag) {}
259
260 FastMathFlagGuard(const FastMathFlagGuard &) = delete;
261 FastMathFlagGuard &operator=(const FastMathFlagGuard &) = delete;
262
263 ~FastMathFlagGuard() {
264 Builder.FMF = FMF;
265 Builder.DefaultFPMathTag = FPMathTag;
266 }
267 };
268
269 //===--------------------------------------------------------------------===//
270 // Miscellaneous creation methods.
271 //===--------------------------------------------------------------------===//
272
273 /// \brief Make a new global variable with initializer type i8*
274 ///
275 /// Make a new global variable with an initializer that has array of i8 type
276 /// filled in with the null terminated string value specified. The new global
277 /// variable will be marked mergable with any others of the same contents. If
278 /// Name is specified, it is the name of the global variable created.
279 GlobalVariable *CreateGlobalString(StringRef Str, const Twine &Name = "",
280 unsigned AddressSpace = 0);
281
282 /// \brief Get a constant value representing either true or false.
283 ConstantInt *getInt1(bool V) {
284 return ConstantInt::get(getInt1Ty(), V);
285 }
286
287 /// \brief Get the constant value for i1 true.
288 ConstantInt *getTrue() {
289 return ConstantInt::getTrue(Context);
290 }
291
292 /// \brief Get the constant value for i1 false.
293 ConstantInt *getFalse() {
294 return ConstantInt::getFalse(Context);
295 }
296
297 /// \brief Get a constant 8-bit value.
298 ConstantInt *getInt8(uint8_t C) {
299 return ConstantInt::get(getInt8Ty(), C);
300 }
301
302 /// \brief Get a constant 16-bit value.
303 ConstantInt *getInt16(uint16_t C) {
304 return ConstantInt::get(getInt16Ty(), C);
305 }
306
307 /// \brief Get a constant 32-bit value.
308 ConstantInt *getInt32(uint32_t C) {
309 return ConstantInt::get(getInt32Ty(), C);
310 }
311
312 /// \brief Get a constant 64-bit value.
313 ConstantInt *getInt64(uint64_t C) {
314 return ConstantInt::get(getInt64Ty(), C);
315 }
316
317 /// \brief Get a constant N-bit value, zero extended or truncated from
318 /// a 64-bit value.
319 ConstantInt *getIntN(unsigned N, uint64_t C) {
320 return ConstantInt::get(getIntNTy(N), C);
321 }
322
323 /// \brief Get a constant integer value.
324 ConstantInt *getInt(const APInt &AI) {
325 return ConstantInt::get(Context, AI);
326 }
327
328 //===--------------------------------------------------------------------===//
329 // Type creation methods
330 //===--------------------------------------------------------------------===//
331
332 /// \brief Fetch the type representing a single bit
333 IntegerType *getInt1Ty() {
334 return Type::getInt1Ty(Context);
335 }
336
337 /// \brief Fetch the type representing an 8-bit integer.
338 IntegerType *getInt8Ty() {
339 return Type::getInt8Ty(Context);
340 }
341
342 /// \brief Fetch the type representing a 16-bit integer.
343 IntegerType *getInt16Ty() {
344 return Type::getInt16Ty(Context);
345 }
346
347 /// \brief Fetch the type representing a 32-bit integer.
348 IntegerType *getInt32Ty() {
349 return Type::getInt32Ty(Context);
350 }
351
352 /// \brief Fetch the type representing a 64-bit integer.
353 IntegerType *getInt64Ty() {
354 return Type::getInt64Ty(Context);
355 }
356
357 /// \brief Fetch the type representing a 128-bit integer.
358 IntegerType *getInt128Ty() { return Type::getInt128Ty(Context); }
359
360 /// \brief Fetch the type representing an N-bit integer.
361 IntegerType *getIntNTy(unsigned N) {
362 return Type::getIntNTy(Context, N);
363 }
364
365 /// \brief Fetch the type representing a 16-bit floating point value.
366 Type *getHalfTy() {
367 return Type::getHalfTy(Context);
368 }
369
370 /// \brief Fetch the type representing a 32-bit floating point value.
371 Type *getFloatTy() {
372 return Type::getFloatTy(Context);
373 }
374
375 /// \brief Fetch the type representing a 64-bit floating point value.
376 Type *getDoubleTy() {
377 return Type::getDoubleTy(Context);
378 }
379
380 /// \brief Fetch the type representing void.
381 Type *getVoidTy() {
382 return Type::getVoidTy(Context);
383 }
384
385 /// \brief Fetch the type representing a pointer to an 8-bit integer value.
386 PointerType *getInt8PtrTy(unsigned AddrSpace = 0) {
387 return Type::getInt8PtrTy(Context, AddrSpace);
388 }
389
390 /// \brief Fetch the type representing a pointer to an integer value.
391 IntegerType *getIntPtrTy(const DataLayout &DL, unsigned AddrSpace = 0) {
392 return DL.getIntPtrType(Context, AddrSpace);
393 }
394
395 //===--------------------------------------------------------------------===//
396 // Intrinsic creation methods
397 //===--------------------------------------------------------------------===//
398
399 /// \brief Create and insert a memset to the specified pointer and the
400 /// specified value.
401 ///
402 /// If the pointer isn't an i8*, it will be converted. If a TBAA tag is
403 /// specified, it will be added to the instruction. Likewise with alias.scope
404 /// and noalias tags.
405 CallInst *CreateMemSet(Value *Ptr, Value *Val, uint64_t Size, unsigned Align,
406 bool isVolatile = false, MDNode *TBAATag = nullptr,
407 MDNode *ScopeTag = nullptr,
408 MDNode *NoAliasTag = nullptr) {
409 return CreateMemSet(Ptr, Val, getInt64(Size), Align, isVolatile,
410 TBAATag, ScopeTag, NoAliasTag);
411 }
412
413 CallInst *CreateMemSet(Value *Ptr, Value *Val, Value *Size, unsigned Align,
414 bool isVolatile = false, MDNode *TBAATag = nullptr,
415 MDNode *ScopeTag = nullptr,
416 MDNode *NoAliasTag = nullptr);
417
418 /// \brief Create and insert a memcpy between the specified pointers.
419 ///
420 /// If the pointers aren't i8*, they will be converted. If a TBAA tag is
421 /// specified, it will be added to the instruction. Likewise with alias.scope
422 /// and noalias tags.
423 CallInst *CreateMemCpy(Value *Dst, Value *Src, uint64_t Size, unsigned Align,
424 bool isVolatile = false, MDNode *TBAATag = nullptr,
425 MDNode *TBAAStructTag = nullptr,
426 MDNode *ScopeTag = nullptr,
427 MDNode *NoAliasTag = nullptr) {
428 return CreateMemCpy(Dst, Src, getInt64(Size), Align, isVolatile, TBAATag,
429 TBAAStructTag, ScopeTag, NoAliasTag);
430 }
431
432 CallInst *CreateMemCpy(Value *Dst, Value *Src, Value *Size, unsigned Align,
433 bool isVolatile = false, MDNode *TBAATag = nullptr,
434 MDNode *TBAAStructTag = nullptr,
435 MDNode *ScopeTag = nullptr,
436 MDNode *NoAliasTag = nullptr);
437
438 /// \brief Create and insert an element unordered-atomic memcpy between the
439 /// specified pointers.
440 ///
441 /// DstAlign/SrcAlign are the alignments of the Dst/Src pointers, respectively.
442 ///
443 /// If the pointers aren't i8*, they will be converted. If a TBAA tag is
444 /// specified, it will be added to the instruction. Likewise with alias.scope
445 /// and noalias tags.
446 CallInst *CreateElementUnorderedAtomicMemCpy(
447 Value *Dst, unsigned DstAlign, Value *Src, unsigned SrcAlign,
448 uint64_t Size, uint32_t ElementSize, MDNode *TBAATag = nullptr,
449 MDNode *TBAAStructTag = nullptr, MDNode *ScopeTag = nullptr,
450 MDNode *NoAliasTag = nullptr) {
451 return CreateElementUnorderedAtomicMemCpy(
452 Dst, DstAlign, Src, SrcAlign, getInt64(Size), ElementSize, TBAATag,
453 TBAAStructTag, ScopeTag, NoAliasTag);
454 }
455
456 CallInst *CreateElementUnorderedAtomicMemCpy(
457 Value *Dst, unsigned DstAlign, Value *Src, unsigned SrcAlign, Value *Size,
458 uint32_t ElementSize, MDNode *TBAATag = nullptr,
459 MDNode *TBAAStructTag = nullptr, MDNode *ScopeTag = nullptr,
460 MDNode *NoAliasTag = nullptr);
461
462 /// \brief Create and insert a memmove between the specified
463 /// pointers.
464 ///
465 /// If the pointers aren't i8*, they will be converted. If a TBAA tag is
466 /// specified, it will be added to the instruction. Likewise with alias.scope
467 /// and noalias tags.
468 CallInst *CreateMemMove(Value *Dst, Value *Src, uint64_t Size, unsigned Align,
469 bool isVolatile = false, MDNode *TBAATag = nullptr,
470 MDNode *ScopeTag = nullptr,
471 MDNode *NoAliasTag = nullptr) {
472 return CreateMemMove(Dst, Src, getInt64(Size), Align, isVolatile,
473 TBAATag, ScopeTag, NoAliasTag);
474 }
475
476 CallInst *CreateMemMove(Value *Dst, Value *Src, Value *Size, unsigned Align,
477 bool isVolatile = false, MDNode *TBAATag = nullptr,
478 MDNode *ScopeTag = nullptr,
479 MDNode *NoAliasTag = nullptr);
480
481 /// \brief Create a vector fadd reduction intrinsic of the source vector.
482 /// The first parameter is a scalar accumulator value for ordered reductions.
483 CallInst *CreateFAddReduce(Value *Acc, Value *Src);
484
485 /// \brief Create a vector fmul reduction intrinsic of the source vector.
486 /// The first parameter is a scalar accumulator value for ordered reductions.
487 CallInst *CreateFMulReduce(Value *Acc, Value *Src);
488
489 /// \brief Create a vector int add reduction intrinsic of the source vector.
490 CallInst *CreateAddReduce(Value *Src);
491
492 /// \brief Create a vector int mul reduction intrinsic of the source vector.
493 CallInst *CreateMulReduce(Value *Src);
494
495 /// \brief Create a vector int AND reduction intrinsic of the source vector.
496 CallInst *CreateAndReduce(Value *Src);
497
498 /// \brief Create a vector int OR reduction intrinsic of the source vector.
499 CallInst *CreateOrReduce(Value *Src);
500
501 /// \brief Create a vector int XOR reduction intrinsic of the source vector.
502 CallInst *CreateXorReduce(Value *Src);
503
504 /// \brief Create a vector integer max reduction intrinsic of the source
505 /// vector.
506 CallInst *CreateIntMaxReduce(Value *Src, bool IsSigned = false);
507
508 /// \brief Create a vector integer min reduction intrinsic of the source
509 /// vector.
510 CallInst *CreateIntMinReduce(Value *Src, bool IsSigned = false);
511
512 /// \brief Create a vector float max reduction intrinsic of the source
513 /// vector.
514 CallInst *CreateFPMaxReduce(Value *Src, bool NoNaN = false);
515
516 /// \brief Create a vector float min reduction intrinsic of the source
517 /// vector.
518 CallInst *CreateFPMinReduce(Value *Src, bool NoNaN = false);
519
520 /// \brief Create a lifetime.start intrinsic.
521 ///
522 /// If the pointer isn't i8* it will be converted.
523 CallInst *CreateLifetimeStart(Value *Ptr, ConstantInt *Size = nullptr);
524
525 /// \brief Create a lifetime.end intrinsic.
526 ///
527 /// If the pointer isn't i8* it will be converted.
528 CallInst *CreateLifetimeEnd(Value *Ptr, ConstantInt *Size = nullptr);
529
530 /// Create a call to invariant.start intrinsic.
531 ///
532 /// If the pointer isn't i8* it will be converted.
533 CallInst *CreateInvariantStart(Value *Ptr, ConstantInt *Size = nullptr);
534
535 /// \brief Create a call to Masked Load intrinsic
536 CallInst *CreateMaskedLoad(Value *Ptr, unsigned Align, Value *Mask,
537 Value *PassThru = nullptr, const Twine &Name = "");
538
539 /// \brief Create a call to Masked Store intrinsic
540 CallInst *CreateMaskedStore(Value *Val, Value *Ptr, unsigned Align,
541 Value *Mask);
542
543 /// \brief Create a call to Masked Gather intrinsic
544 CallInst *CreateMaskedGather(Value *Ptrs, unsigned Align,
545 Value *Mask = nullptr,
546 Value *PassThru = nullptr,
547 const Twine& Name = "");
548
549 /// \brief Create a call to Masked Scatter intrinsic
550 CallInst *CreateMaskedScatter(Value *Val, Value *Ptrs, unsigned Align,
551 Value *Mask = nullptr);
552
553 /// \brief Create an assume intrinsic call that allows the optimizer to
554 /// assume that the provided condition will be true.
555 CallInst *CreateAssumption(Value *Cond);
556
557 /// \brief Create a call to the experimental.gc.statepoint intrinsic to
558 /// start a new statepoint sequence.
559 CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes,
560 Value *ActualCallee,
561 ArrayRef<Value *> CallArgs,
562 ArrayRef<Value *> DeoptArgs,
563 ArrayRef<Value *> GCArgs,
564 const Twine &Name = "");
565
566 /// \brief Create a call to the experimental.gc.statepoint intrinsic to
567 /// start a new statepoint sequence.
568 CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes,
569 Value *ActualCallee, uint32_t Flags,
570 ArrayRef<Use> CallArgs,
571 ArrayRef<Use> TransitionArgs,
572 ArrayRef<Use> DeoptArgs,
573 ArrayRef<Value *> GCArgs,
574 const Twine &Name = "");
575
576 // \brief Conveninence function for the common case when CallArgs are filled
577 // in using makeArrayRef(CS.arg_begin(), CS.arg_end()); Use needs to be
578 // .get()'ed to get the Value pointer.
579 CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes,
580 Value *ActualCallee, ArrayRef<Use> CallArgs,
581 ArrayRef<Value *> DeoptArgs,
582 ArrayRef<Value *> GCArgs,
583 const Twine &Name = "");
584
585 /// brief Create an invoke to the experimental.gc.statepoint intrinsic to
586 /// start a new statepoint sequence.
587 InvokeInst *
588 CreateGCStatepointInvoke(uint64_t ID, uint32_t NumPatchBytes,
589 Value *ActualInvokee, BasicBlock *NormalDest,
590 BasicBlock *UnwindDest, ArrayRef<Value *> InvokeArgs,
591 ArrayRef<Value *> DeoptArgs,
592 ArrayRef<Value *> GCArgs, const Twine &Name = "");
593
594 /// brief Create an invoke to the experimental.gc.statepoint intrinsic to
595 /// start a new statepoint sequence.
596 InvokeInst *CreateGCStatepointInvoke(
597 uint64_t ID, uint32_t NumPatchBytes, Value *ActualInvokee,
598 BasicBlock *NormalDest, BasicBlock *UnwindDest, uint32_t Flags,
599 ArrayRef<Use> InvokeArgs, ArrayRef<Use> TransitionArgs,
600 ArrayRef<Use> DeoptArgs, ArrayRef<Value *> GCArgs,
601 const Twine &Name = "");
602
603 // Conveninence function for the common case when CallArgs are filled in using
604 // makeArrayRef(CS.arg_begin(), CS.arg_end()); Use needs to be .get()'ed to
605 // get the Value *.
606 InvokeInst *
607 CreateGCStatepointInvoke(uint64_t ID, uint32_t NumPatchBytes,
608 Value *ActualInvokee, BasicBlock *NormalDest,
609 BasicBlock *UnwindDest, ArrayRef<Use> InvokeArgs,
610 ArrayRef<Value *> DeoptArgs,
611 ArrayRef<Value *> GCArgs, const Twine &Name = "");
612
613 /// \brief Create a call to the experimental.gc.result intrinsic to extract
614 /// the result from a call wrapped in a statepoint.
615 CallInst *CreateGCResult(Instruction *Statepoint,
616 Type *ResultType,
617 const Twine &Name = "");
618
619 /// \brief Create a call to the experimental.gc.relocate intrinsics to
620 /// project the relocated value of one pointer from the statepoint.
621 CallInst *CreateGCRelocate(Instruction *Statepoint,
622 int BaseOffset,
623 int DerivedOffset,
624 Type *ResultType,
625 const Twine &Name = "");
626
627 /// Create a call to intrinsic \p ID with 2 operands which is mangled on the
628 /// first type.
629 CallInst *CreateBinaryIntrinsic(Intrinsic::ID ID,
630 Value *LHS, Value *RHS,
631 const Twine &Name = "");
632
633 /// Create call to the minnum intrinsic.
634 CallInst *CreateMinNum(Value *LHS, Value *RHS, const Twine &Name = "") {
635 return CreateBinaryIntrinsic(Intrinsic::minnum, LHS, RHS, Name);
636 }
637
638 /// Create call to the maxnum intrinsic.
639 CallInst *CreateMaxNum(Value *LHS, Value *RHS, const Twine &Name = "") {
640 return CreateBinaryIntrinsic(Intrinsic::minnum, LHS, RHS, Name);
641 }
642
643private:
644 /// \brief Create a call to a masked intrinsic with given Id.
645 CallInst *CreateMaskedIntrinsic(Intrinsic::ID Id, ArrayRef<Value *> Ops,
646 ArrayRef<Type *> OverloadedTypes,
647 const Twine &Name = "");
648
649 Value *getCastedInt8PtrValue(Value *Ptr);
650};
651
652/// \brief This provides a uniform API for creating instructions and inserting
653/// them into a basic block: either at the end of a BasicBlock, or at a specific
654/// iterator location in a block.
655///
656/// Note that the builder does not expose the full generality of LLVM
657/// instructions. For access to extra instruction properties, use the mutators
658/// (e.g. setVolatile) on the instructions after they have been
659/// created. Convenience state exists to specify fast-math flags and fp-math
660/// tags.
661///
662/// The first template argument specifies a class to use for creating constants.
663/// This defaults to creating minimally folded constants. The second template
664/// argument allows clients to specify custom insertion hooks that are called on
665/// every newly created insertion.
666template <typename T = ConstantFolder,
667 typename Inserter = IRBuilderDefaultInserter>
668class IRBuilder : public IRBuilderBase, public Inserter {
669 T Folder;
670
671public:
672 IRBuilder(LLVMContext &C, const T &F, Inserter I = Inserter(),
673 MDNode *FPMathTag = nullptr,
674 ArrayRef<OperandBundleDef> OpBundles = None)
675 : IRBuilderBase(C, FPMathTag, OpBundles), Inserter(std::move(I)),
676 Folder(F) {}
677
678 explicit IRBuilder(LLVMContext &C, MDNode *FPMathTag = nullptr,
679 ArrayRef<OperandBundleDef> OpBundles = None)
680 : IRBuilderBase(C, FPMathTag, OpBundles), Folder() {}
681
682 explicit IRBuilder(BasicBlock *TheBB, const T &F, MDNode *FPMathTag = nullptr,
683 ArrayRef<OperandBundleDef> OpBundles = None)
684 : IRBuilderBase(TheBB->getContext(), FPMathTag, OpBundles), Folder(F) {
685 SetInsertPoint(TheBB);
686 }
687
688 explicit IRBuilder(BasicBlock *TheBB, MDNode *FPMathTag = nullptr,
689 ArrayRef<OperandBundleDef> OpBundles = None)
690 : IRBuilderBase(TheBB->getContext(), FPMathTag, OpBundles), Folder() {
691 SetInsertPoint(TheBB);
692 }
693
694 explicit IRBuilder(Instruction *IP, MDNode *FPMathTag = nullptr,
695 ArrayRef<OperandBundleDef> OpBundles = None)
696 : IRBuilderBase(IP->getContext(), FPMathTag, OpBundles), Folder() {
697 SetInsertPoint(IP);
698 }
699
700 IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP, const T &F,
701 MDNode *FPMathTag = nullptr,
702 ArrayRef<OperandBundleDef> OpBundles = None)
703 : IRBuilderBase(TheBB->getContext(), FPMathTag, OpBundles), Folder(F) {
704 SetInsertPoint(TheBB, IP);
705 }
706
707 IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP,
708 MDNode *FPMathTag = nullptr,
709 ArrayRef<OperandBundleDef> OpBundles = None)
710 : IRBuilderBase(TheBB->getContext(), FPMathTag, OpBundles), Folder() {
711 SetInsertPoint(TheBB, IP);
712 }
713
714 /// \brief Get the constant folder being used.
715 const T &getFolder() { return Folder; }
716
717 /// \brief Insert and return the specified instruction.
718 template<typename InstTy>
719 InstTy *Insert(InstTy *I, const Twine &Name = "") const {
720 this->InsertHelper(I, Name, BB, InsertPt);
37
Calling 'IRBuilderDefaultInserter::InsertHelper'
40
Returning from 'IRBuilderDefaultInserter::InsertHelper'
84
Calling 'IRBuilderDefaultInserter::InsertHelper'
86
Returning from 'IRBuilderDefaultInserter::InsertHelper'
130
Calling 'IRBuilderDefaultInserter::InsertHelper'
132
Returning from 'IRBuilderDefaultInserter::InsertHelper'
721 this->SetInstDebugLocation(I);
41
Calling 'IRBuilderBase::SetInstDebugLocation'
52
Returning from 'IRBuilderBase::SetInstDebugLocation'
87
Calling 'IRBuilderBase::SetInstDebugLocation'
98
Returning from 'IRBuilderBase::SetInstDebugLocation'
133
Calling 'IRBuilderBase::SetInstDebugLocation'
144
Returning from 'IRBuilderBase::SetInstDebugLocation'
722 return I;
723 }
724
725 /// \brief No-op overload to handle constants.
726 Constant *Insert(Constant *C, const Twine& = "") const {
727 return C;
728 }
729
730 //===--------------------------------------------------------------------===//
731 // Instruction creation methods: Terminators
732 //===--------------------------------------------------------------------===//
733
734private:
735 /// \brief Helper to add branch weight and unpredictable metadata onto an
736 /// instruction.
737 /// \returns The annotated instruction.
738 template <typename InstTy>
739 InstTy *addBranchMetadata(InstTy *I, MDNode *Weights, MDNode *Unpredictable) {
740 if (Weights)
741 I->setMetadata(LLVMContext::MD_prof, Weights);
742 if (Unpredictable)
743 I->setMetadata(LLVMContext::MD_unpredictable, Unpredictable);
744 return I;
745 }
746
747public:
748 /// \brief Create a 'ret void' instruction.
749 ReturnInst *CreateRetVoid() {
750 return Insert(ReturnInst::Create(Context));
751 }
752
753 /// \brief Create a 'ret <val>' instruction.
754 ReturnInst *CreateRet(Value *V) {
755 return Insert(ReturnInst::Create(Context, V));
756 }
757
758 /// \brief Create a sequence of N insertvalue instructions,
759 /// with one Value from the retVals array each, that build a aggregate
760 /// return value one value at a time, and a ret instruction to return
761 /// the resulting aggregate value.
762 ///
763 /// This is a convenience function for code that uses aggregate return values
764 /// as a vehicle for having multiple return values.
765 ReturnInst *CreateAggregateRet(Value *const *retVals, unsigned N) {
766 Value *V = UndefValue::get(getCurrentFunctionReturnType());
767 for (unsigned i = 0; i != N; ++i)
768 V = CreateInsertValue(V, retVals[i], i, "mrv");
769 return Insert(ReturnInst::Create(Context, V));
770 }
771
772 /// \brief Create an unconditional 'br label X' instruction.
773 BranchInst *CreateBr(BasicBlock *Dest) {
774 return Insert(BranchInst::Create(Dest));
775 }
776
777 /// \brief Create a conditional 'br Cond, TrueDest, FalseDest'
778 /// instruction.
779 BranchInst *CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False,
780 MDNode *BranchWeights = nullptr,
781 MDNode *Unpredictable = nullptr) {
782 return Insert(addBranchMetadata(BranchInst::Create(True, False, Cond),
783 BranchWeights, Unpredictable));
784 }
785
786 /// \brief Create a conditional 'br Cond, TrueDest, FalseDest'
787 /// instruction. Copy branch meta data if available.
788 BranchInst *CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False,
789 Instruction *MDSrc) {
790 BranchInst *Br = BranchInst::Create(True, False, Cond);
791 if (MDSrc) {
792 unsigned WL[4] = {LLVMContext::MD_prof, LLVMContext::MD_unpredictable,
793 LLVMContext::MD_make_implicit, LLVMContext::MD_dbg};
794 Br->copyMetadata(*MDSrc, makeArrayRef(&WL[0], 4));
795 }
796 return Insert(Br);
797 }
798
799 /// \brief Create a switch instruction with the specified value, default dest,
800 /// and with a hint for the number of cases that will be added (for efficient
801 /// allocation).
802 SwitchInst *CreateSwitch(Value *V, BasicBlock *Dest, unsigned NumCases = 10,
803 MDNode *BranchWeights = nullptr,
804 MDNode *Unpredictable = nullptr) {
805 return Insert(addBranchMetadata(SwitchInst::Create(V, Dest, NumCases),
806 BranchWeights, Unpredictable));
807 }
808
809 /// \brief Create an indirect branch instruction with the specified address
810 /// operand, with an optional hint for the number of destinations that will be
811 /// added (for efficient allocation).
812 IndirectBrInst *CreateIndirectBr(Value *Addr, unsigned NumDests = 10) {
813 return Insert(IndirectBrInst::Create(Addr, NumDests));
814 }
815
816 /// \brief Create an invoke instruction.
817 InvokeInst *CreateInvoke(Value *Callee, BasicBlock *NormalDest,
818 BasicBlock *UnwindDest,
819 ArrayRef<Value *> Args = None,
820 const Twine &Name = "") {
821 return Insert(InvokeInst::Create(Callee, NormalDest, UnwindDest, Args),
822 Name);
823 }
824 InvokeInst *CreateInvoke(Value *Callee, BasicBlock *NormalDest,
825 BasicBlock *UnwindDest, ArrayRef<Value *> Args,
826 ArrayRef<OperandBundleDef> OpBundles,
827 const Twine &Name = "") {
828 return Insert(InvokeInst::Create(Callee, NormalDest, UnwindDest, Args,
829 OpBundles), Name);
830 }
831
832 ResumeInst *CreateResume(Value *Exn) {
833 return Insert(ResumeInst::Create(Exn));
834 }
835
836 CleanupReturnInst *CreateCleanupRet(CleanupPadInst *CleanupPad,
837 BasicBlock *UnwindBB = nullptr) {
838 return Insert(CleanupReturnInst::Create(CleanupPad, UnwindBB));
839 }
840
841 CatchSwitchInst *CreateCatchSwitch(Value *ParentPad, BasicBlock *UnwindBB,
842 unsigned NumHandlers,
843 const Twine &Name = "") {
844 return Insert(CatchSwitchInst::Create(ParentPad, UnwindBB, NumHandlers),
845 Name);
846 }
847
848 CatchPadInst *CreateCatchPad(Value *ParentPad, ArrayRef<Value *> Args,
849 const Twine &Name = "") {
850 return Insert(CatchPadInst::Create(ParentPad, Args), Name);
851 }
852
853 CleanupPadInst *CreateCleanupPad(Value *ParentPad,
854 ArrayRef<Value *> Args = None,
855 const Twine &Name = "") {
856 return Insert(CleanupPadInst::Create(ParentPad, Args), Name);
857 }
858
859 CatchReturnInst *CreateCatchRet(CatchPadInst *CatchPad, BasicBlock *BB) {
860 return Insert(CatchReturnInst::Create(CatchPad, BB));
861 }
862
863 UnreachableInst *CreateUnreachable() {
864 return Insert(new UnreachableInst(Context));
865 }
866
867 //===--------------------------------------------------------------------===//
868 // Instruction creation methods: Binary Operators
869 //===--------------------------------------------------------------------===//
870private:
871 BinaryOperator *CreateInsertNUWNSWBinOp(BinaryOperator::BinaryOps Opc,
872 Value *LHS, Value *RHS,
873 const Twine &Name,
874 bool HasNUW, bool HasNSW) {
875 BinaryOperator *BO = Insert(BinaryOperator::Create(Opc, LHS, RHS), Name);
876 if (HasNUW) BO->setHasNoUnsignedWrap();
877 if (HasNSW) BO->setHasNoSignedWrap();
878 return BO;
879 }
880
881 Instruction *AddFPMathAttributes(Instruction *I,
882 MDNode *FPMathTag,
883 FastMathFlags FMF) const {
884 if (!FPMathTag)
885 FPMathTag = DefaultFPMathTag;
886 if (FPMathTag)
887 I->setMetadata(LLVMContext::MD_fpmath, FPMathTag);
888 I->setFastMathFlags(FMF);
889 return I;
890 }
891
892public:
893 Value *CreateAdd(Value *LHS, Value *RHS, const Twine &Name = "",
894 bool HasNUW = false, bool HasNSW = false) {
895 if (Constant *LC = dyn_cast<Constant>(LHS))
896 if (Constant *RC = dyn_cast<Constant>(RHS))
897 return Insert(Folder.CreateAdd(LC, RC, HasNUW, HasNSW), Name);
898 return CreateInsertNUWNSWBinOp(Instruction::Add, LHS, RHS, Name,
899 HasNUW, HasNSW);
900 }
901 Value *CreateNSWAdd(Value *LHS, Value *RHS, const Twine &Name = "") {
902 return CreateAdd(LHS, RHS, Name, false, true);
903 }
904 Value *CreateNUWAdd(Value *LHS, Value *RHS, const Twine &Name = "") {
905 return CreateAdd(LHS, RHS, Name, true, false);
906 }
907 Value *CreateFAdd(Value *LHS, Value *RHS, const Twine &Name = "",
908 MDNode *FPMathTag = nullptr) {
909 if (Constant *LC = dyn_cast<Constant>(LHS))
910 if (Constant *RC = dyn_cast<Constant>(RHS))
911 return Insert(Folder.CreateFAdd(LC, RC), Name);
912 return Insert(AddFPMathAttributes(BinaryOperator::CreateFAdd(LHS, RHS),
913 FPMathTag, FMF), Name);
914 }
915 Value *CreateSub(Value *LHS, Value *RHS, const Twine &Name = "",
916 bool HasNUW = false, bool HasNSW = false) {
917 if (Constant *LC = dyn_cast<Constant>(LHS))
918 if (Constant *RC = dyn_cast<Constant>(RHS))
919 return Insert(Folder.CreateSub(LC, RC, HasNUW, HasNSW), Name);
920 return CreateInsertNUWNSWBinOp(Instruction::Sub, LHS, RHS, Name,
921 HasNUW, HasNSW);
922 }
923 Value *CreateNSWSub(Value *LHS, Value *RHS, const Twine &Name = "") {
924 return CreateSub(LHS, RHS, Name, false, true);
925 }
926 Value *CreateNUWSub(Value *LHS, Value *RHS, const Twine &Name = "") {
927 return CreateSub(LHS, RHS, Name, true, false);
928 }
929 Value *CreateFSub(Value *LHS, Value *RHS, const Twine &Name = "",
930 MDNode *FPMathTag = nullptr) {
931 if (Constant *LC = dyn_cast<Constant>(LHS))
932 if (Constant *RC = dyn_cast<Constant>(RHS))
933 return Insert(Folder.CreateFSub(LC, RC), Name);
934 return Insert(AddFPMathAttributes(BinaryOperator::CreateFSub(LHS, RHS),
935 FPMathTag, FMF), Name);
936 }
937 Value *CreateMul(Value *LHS, Value *RHS, const Twine &Name = "",
938 bool HasNUW = false, bool HasNSW = false) {
939 if (Constant *LC = dyn_cast<Constant>(LHS))
940 if (Constant *RC = dyn_cast<Constant>(RHS))
941 return Insert(Folder.CreateMul(LC, RC, HasNUW, HasNSW), Name);
942 return CreateInsertNUWNSWBinOp(Instruction::Mul, LHS, RHS, Name,
943 HasNUW, HasNSW);
944 }
945 Value *CreateNSWMul(Value *LHS, Value *RHS, const Twine &Name = "") {
946 return CreateMul(LHS, RHS, Name, false, true);
947 }
948 Value *CreateNUWMul(Value *LHS, Value *RHS, const Twine &Name = "") {
949 return CreateMul(LHS, RHS, Name, true, false);
950 }
951 Value *CreateFMul(Value *LHS, Value *RHS, const Twine &Name = "",
952 MDNode *FPMathTag = nullptr) {
953 if (Constant *LC = dyn_cast<Constant>(LHS))
954 if (Constant *RC = dyn_cast<Constant>(RHS))
955 return Insert(Folder.CreateFMul(LC, RC), Name);
956 return Insert(AddFPMathAttributes(BinaryOperator::CreateFMul(LHS, RHS),
957 FPMathTag, FMF), Name);
958 }
959 Value *CreateUDiv(Value *LHS, Value *RHS, const Twine &Name = "",
960 bool isExact = false) {
961 if (Constant *LC = dyn_cast<Constant>(LHS))
962 if (Constant *RC = dyn_cast<Constant>(RHS))
963 return Insert(Folder.CreateUDiv(LC, RC, isExact), Name);
964 if (!isExact)
965 return Insert(BinaryOperator::CreateUDiv(LHS, RHS), Name);
966 return Insert(BinaryOperator::CreateExactUDiv(LHS, RHS), Name);
967 }
968 Value *CreateExactUDiv(Value *LHS, Value *RHS, const Twine &Name = "") {
969 return CreateUDiv(LHS, RHS, Name, true);
970 }
971 Value *CreateSDiv(Value *LHS, Value *RHS, const Twine &Name = "",
972 bool isExact = false) {
973 if (Constant *LC = dyn_cast<Constant>(LHS))
974 if (Constant *RC = dyn_cast<Constant>(RHS))
975 return Insert(Folder.CreateSDiv(LC, RC, isExact), Name);
976 if (!isExact)
977 return Insert(BinaryOperator::CreateSDiv(LHS, RHS), Name);
978 return Insert(BinaryOperator::CreateExactSDiv(LHS, RHS), Name);
979 }
980 Value *CreateExactSDiv(Value *LHS, Value *RHS, const Twine &Name = "") {
981 return CreateSDiv(LHS, RHS, Name, true);
982 }
983 Value *CreateFDiv(Value *LHS, Value *RHS, const Twine &Name = "",
984 MDNode *FPMathTag = nullptr) {
985 if (Constant *LC = dyn_cast<Constant>(LHS))
986 if (Constant *RC = dyn_cast<Constant>(RHS))
987 return Insert(Folder.CreateFDiv(LC, RC), Name);
988 return Insert(AddFPMathAttributes(BinaryOperator::CreateFDiv(LHS, RHS),
989 FPMathTag, FMF), Name);
990 }
991 Value *CreateURem(Value *LHS, Value *RHS, const Twine &Name = "") {
992 if (Constant *LC = dyn_cast<Constant>(LHS))
993 if (Constant *RC = dyn_cast<Constant>(RHS))
994 return Insert(Folder.CreateURem(LC, RC), Name);
995 return Insert(BinaryOperator::CreateURem(LHS, RHS), Name);
996 }
997 Value *CreateSRem(Value *LHS, Value *RHS, const Twine &Name = "") {
998 if (Constant *LC = dyn_cast<Constant>(LHS))
999 if (Constant *RC = dyn_cast<Constant>(RHS))
1000 return Insert(Folder.CreateSRem(LC, RC), Name);
1001 return Insert(BinaryOperator::CreateSRem(LHS, RHS), Name);
1002 }
1003 Value *CreateFRem(Value *LHS, Value *RHS, const Twine &Name = "",
1004 MDNode *FPMathTag = nullptr) {
1005 if (Constant *LC = dyn_cast<Constant>(LHS))
1006 if (Constant *RC = dyn_cast<Constant>(RHS))
1007 return Insert(Folder.CreateFRem(LC, RC), Name);
1008 return Insert(AddFPMathAttributes(BinaryOperator::CreateFRem(LHS, RHS),
1009 FPMathTag, FMF), Name);
1010 }
1011
1012 Value *CreateShl(Value *LHS, Value *RHS, const Twine &Name = "",
1013 bool HasNUW = false, bool HasNSW = false) {
1014 if (Constant *LC = dyn_cast<Constant>(LHS))
1015 if (Constant *RC = dyn_cast<Constant>(RHS))
1016 return Insert(Folder.CreateShl(LC, RC, HasNUW, HasNSW), Name);
1017 return CreateInsertNUWNSWBinOp(Instruction::Shl, LHS, RHS, Name,
1018 HasNUW, HasNSW);
1019 }
1020 Value *CreateShl(Value *LHS, const APInt &RHS, const Twine &Name = "",
1021 bool HasNUW = false, bool HasNSW = false) {
1022 return CreateShl(LHS, ConstantInt::get(LHS->getType(), RHS), Name,
1023 HasNUW, HasNSW);
1024 }
1025 Value *CreateShl(Value *LHS, uint64_t RHS, const Twine &Name = "",
1026 bool HasNUW = false, bool HasNSW = false) {
1027 return CreateShl(LHS, ConstantInt::get(LHS->getType(), RHS), Name,
1028 HasNUW, HasNSW);
1029 }
1030
1031 Value *CreateLShr(Value *LHS, Value *RHS, const Twine &Name = "",
1032 bool isExact = false) {
1033 if (Constant *LC = dyn_cast<Constant>(LHS))
1034 if (Constant *RC = dyn_cast<Constant>(RHS))
1035 return Insert(Folder.CreateLShr(LC, RC, isExact), Name);
1036 if (!isExact)
1037 return Insert(BinaryOperator::CreateLShr(LHS, RHS), Name);
1038 return Insert(BinaryOperator::CreateExactLShr(LHS, RHS), Name);
1039 }
1040 Value *CreateLShr(Value *LHS, const APInt &RHS, const Twine &Name = "",
1041 bool isExact = false) {
1042 return CreateLShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
1043 }
1044 Value *CreateLShr(Value *LHS, uint64_t RHS, const Twine &Name = "",
1045 bool isExact = false) {
1046 return CreateLShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
1047 }
1048
1049 Value *CreateAShr(Value *LHS, Value *RHS, const Twine &Name = "",
1050 bool isExact = false) {
1051 if (Constant *LC = dyn_cast<Constant>(LHS))
1052 if (Constant *RC = dyn_cast<Constant>(RHS))
1053 return Insert(Folder.CreateAShr(LC, RC, isExact), Name);
1054 if (!isExact)
1055 return Insert(BinaryOperator::CreateAShr(LHS, RHS), Name);
1056 return Insert(BinaryOperator::CreateExactAShr(LHS, RHS), Name);
1057 }
1058 Value *CreateAShr(Value *LHS, const APInt &RHS, const Twine &Name = "",
1059 bool isExact = false) {
1060 return CreateAShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
1061 }
1062 Value *CreateAShr(Value *LHS, uint64_t RHS, const Twine &Name = "",
1063 bool isExact = false) {
1064 return CreateAShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
1065 }
1066
1067 Value *CreateAnd(Value *LHS, Value *RHS, const Twine &Name = "") {
1068 if (Constant *RC = dyn_cast<Constant>(RHS)) {
1069 if (isa<ConstantInt>(RC) && cast<ConstantInt>(RC)->isMinusOne())
1070 return LHS; // LHS & -1 -> LHS
1071 if (Constant *LC = dyn_cast<Constant>(LHS))
1072 return Insert(Folder.CreateAnd(LC, RC), Name);
1073 }
1074 return Insert(BinaryOperator::CreateAnd(LHS, RHS), Name);
1075 }
1076 Value *CreateAnd(Value *LHS, const APInt &RHS, const Twine &Name = "") {
1077 return CreateAnd(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1078 }
1079 Value *CreateAnd(Value *LHS, uint64_t RHS, const Twine &Name = "") {
1080 return CreateAnd(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1081 }
1082
1083 Value *CreateOr(Value *LHS, Value *RHS, const Twine &Name = "") {
1084 if (Constant *RC = dyn_cast<Constant>(RHS)) {
1085 if (RC->isNullValue())
1086 return LHS; // LHS | 0 -> LHS
1087 if (Constant *LC = dyn_cast<Constant>(LHS))
1088 return Insert(Folder.CreateOr(LC, RC), Name);
1089 }
1090 return Insert(BinaryOperator::CreateOr(LHS, RHS), Name);
1091 }
1092 Value *CreateOr(Value *LHS, const APInt &RHS, const Twine &Name = "") {
1093 return CreateOr(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1094 }
1095 Value *CreateOr(Value *LHS, uint64_t RHS, const Twine &Name = "") {
1096 return CreateOr(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1097 }
1098
1099 Value *CreateXor(Value *LHS, Value *RHS, const Twine &Name = "") {
1100 if (Constant *LC = dyn_cast<Constant>(LHS))
1101 if (Constant *RC = dyn_cast<Constant>(RHS))
1102 return Insert(Folder.CreateXor(LC, RC), Name);
1103 return Insert(BinaryOperator::CreateXor(LHS, RHS), Name);
1104 }
1105 Value *CreateXor(Value *LHS, const APInt &RHS, const Twine &Name = "") {
1106 return CreateXor(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1107 }
1108 Value *CreateXor(Value *LHS, uint64_t RHS, const Twine &Name = "") {
1109 return CreateXor(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1110 }
1111
1112 Value *CreateBinOp(Instruction::BinaryOps Opc,
1113 Value *LHS, Value *RHS, const Twine &Name = "",
1114 MDNode *FPMathTag = nullptr) {
1115 if (Constant *LC = dyn_cast<Constant>(LHS))
1116 if (Constant *RC = dyn_cast<Constant>(RHS))
1117 return Insert(Folder.CreateBinOp(Opc, LC, RC), Name);
1118 Instruction *BinOp = BinaryOperator::Create(Opc, LHS, RHS);
1119 if (isa<FPMathOperator>(BinOp))
1120 BinOp = AddFPMathAttributes(BinOp, FPMathTag, FMF);
1121 return Insert(BinOp, Name);
1122 }
1123
1124 Value *CreateNeg(Value *V, const Twine &Name = "",
1125 bool HasNUW = false, bool HasNSW = false) {
1126 if (Constant *VC = dyn_cast<Constant>(V))
1127 return Insert(Folder.CreateNeg(VC, HasNUW, HasNSW), Name);
1128 BinaryOperator *BO = Insert(BinaryOperator::CreateNeg(V), Name);
1129 if (HasNUW) BO->setHasNoUnsignedWrap();
1130 if (HasNSW) BO->setHasNoSignedWrap();
1131 return BO;
1132 }
1133 Value *CreateNSWNeg(Value *V, const Twine &Name = "") {
1134 return CreateNeg(V, Name, false, true);
1135 }
1136 Value *CreateNUWNeg(Value *V, const Twine &Name = "") {
1137 return CreateNeg(V, Name, true, false);
1138 }
1139 Value *CreateFNeg(Value *V, const Twine &Name = "",
1140 MDNode *FPMathTag = nullptr) {
1141 if (Constant *VC = dyn_cast<Constant>(V))
1142 return Insert(Folder.CreateFNeg(VC), Name);
1143 return Insert(AddFPMathAttributes(BinaryOperator::CreateFNeg(V),
1144 FPMathTag, FMF), Name);
1145 }
1146 Value *CreateNot(Value *V, const Twine &Name = "") {
1147 if (Constant *VC = dyn_cast<Constant>(V))
1148 return Insert(Folder.CreateNot(VC), Name);
1149 return Insert(BinaryOperator::CreateNot(V), Name);
1150 }
1151
1152 //===--------------------------------------------------------------------===//
1153 // Instruction creation methods: Memory Instructions
1154 //===--------------------------------------------------------------------===//
1155
1156 AllocaInst *CreateAlloca(Type *Ty, unsigned AddrSpace,
1157 Value *ArraySize = nullptr, const Twine &Name = "") {
1158 return Insert(new AllocaInst(Ty, AddrSpace, ArraySize), Name);
1159 }
1160
1161 AllocaInst *CreateAlloca(Type *Ty, Value *ArraySize = nullptr,
1162 const Twine &Name = "") {
1163 const DataLayout &DL = BB->getParent()->getParent()->getDataLayout();
1164 return Insert(new AllocaInst(Ty, DL.getAllocaAddrSpace(), ArraySize), Name);
1165 }
1166 // \brief Provided to resolve 'CreateLoad(Ptr, "...")' correctly, instead of
1167 // converting the string to 'bool' for the isVolatile parameter.
1168 LoadInst *CreateLoad(Value *Ptr, const char *Name) {
1169 return Insert(new LoadInst(Ptr), Name);
1170 }
1171 LoadInst *CreateLoad(Value *Ptr, const Twine &Name = "") {
1172 return Insert(new LoadInst(Ptr), Name);
1173 }
1174 LoadInst *CreateLoad(Type *Ty, Value *Ptr, const Twine &Name = "") {
1175 return Insert(new LoadInst(Ty, Ptr), Name);
1176 }
1177 LoadInst *CreateLoad(Value *Ptr, bool isVolatile, const Twine &Name = "") {
1178 return Insert(new LoadInst(Ptr, nullptr, isVolatile), Name);
1179 }
1180 StoreInst *CreateStore(Value *Val, Value *Ptr, bool isVolatile = false) {
1181 return Insert(new StoreInst(Val, Ptr, isVolatile));
1182 }
1183 // \brief Provided to resolve 'CreateAlignedLoad(Ptr, Align, "...")'
1184 // correctly, instead of converting the string to 'bool' for the isVolatile
1185 // parameter.
1186 LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, const char *Name) {
1187 LoadInst *LI = CreateLoad(Ptr, Name);
1188 LI->setAlignment(Align);
1189 return LI;
1190 }
1191 LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align,
1192 const Twine &Name = "") {
1193 LoadInst *LI = CreateLoad(Ptr, Name);
1194 LI->setAlignment(Align);
1195 return LI;
1196 }
1197 LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, bool isVolatile,
1198 const Twine &Name = "") {
1199 LoadInst *LI = CreateLoad(Ptr, isVolatile, Name);
1200 LI->setAlignment(Align);
1201 return LI;
1202 }
1203 StoreInst *CreateAlignedStore(Value *Val, Value *Ptr, unsigned Align,
1204 bool isVolatile = false) {
1205 StoreInst *SI = CreateStore(Val, Ptr, isVolatile);
1206 SI->setAlignment(Align);
1207 return SI;
1208 }
1209 FenceInst *CreateFence(AtomicOrdering Ordering,
1210 SyncScope::ID SSID = SyncScope::System,
1211 const Twine &Name = "") {
1212 return Insert(new FenceInst(Context, Ordering, SSID), Name);
1213 }
1214 AtomicCmpXchgInst *
1215 CreateAtomicCmpXchg(Value *Ptr, Value *Cmp, Value *New,
1216 AtomicOrdering SuccessOrdering,
1217 AtomicOrdering FailureOrdering,
1218 SyncScope::ID SSID = SyncScope::System) {
1219 return Insert(new AtomicCmpXchgInst(Ptr, Cmp, New, SuccessOrdering,
1220 FailureOrdering, SSID));
1221 }
1222 AtomicRMWInst *CreateAtomicRMW(AtomicRMWInst::BinOp Op, Value *Ptr, Value *Val,
1223 AtomicOrdering Ordering,
1224 SyncScope::ID SSID = SyncScope::System) {
1225 return Insert(new AtomicRMWInst(Op, Ptr, Val, Ordering, SSID));
1226 }
1227 Value *CreateGEP(Value *Ptr, ArrayRef<Value *> IdxList,
1228 const Twine &Name = "") {
1229 return CreateGEP(nullptr, Ptr, IdxList, Name);
1230 }
1231 Value *CreateGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList,
1232 const Twine &Name = "") {
1233 if (Constant *PC = dyn_cast<Constant>(Ptr)) {
1234 // Every index must be constant.
1235 size_t i, e;
1236 for (i = 0, e = IdxList.size(); i != e; ++i)
1237 if (!isa<Constant>(IdxList[i]))
1238 break;
1239 if (i == e)
1240 return Insert(Folder.CreateGetElementPtr(Ty, PC, IdxList), Name);
1241 }
1242 return Insert(GetElementPtrInst::Create(Ty, Ptr, IdxList), Name);
1243 }
1244 Value *CreateInBoundsGEP(Value *Ptr, ArrayRef<Value *> IdxList,
1245 const Twine &Name = "") {
1246 return CreateInBoundsGEP(nullptr, Ptr, IdxList, Name);
1247 }
1248 Value *CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList,
1249 const Twine &Name = "") {
1250 if (Constant *PC = dyn_cast<Constant>(Ptr)) {
1251 // Every index must be constant.
1252 size_t i, e;
1253 for (i = 0, e = IdxList.size(); i != e; ++i)
1254 if (!isa<Constant>(IdxList[i]))
1255 break;
1256 if (i == e)
1257 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, IdxList),
1258 Name);
1259 }
1260 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, IdxList), Name);
1261 }
1262 Value *CreateGEP(Value *Ptr, Value *Idx, const Twine &Name = "") {
1263 return CreateGEP(nullptr, Ptr, Idx, Name);
1264 }
1265 Value *CreateGEP(Type *Ty, Value *Ptr, Value *Idx, const Twine &Name = "") {
1266 if (Constant *PC = dyn_cast<Constant>(Ptr))
1267 if (Constant *IC = dyn_cast<Constant>(Idx))
1268 return Insert(Folder.CreateGetElementPtr(Ty, PC, IC), Name);
1269 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name);
1270 }
1271 Value *CreateInBoundsGEP(Type *Ty, Value *Ptr, Value *Idx,
1272 const Twine &Name = "") {
1273 if (Constant *PC = dyn_cast<Constant>(Ptr))
1274 if (Constant *IC = dyn_cast<Constant>(Idx))
1275 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, IC), Name);
1276 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name);
1277 }
1278 Value *CreateConstGEP1_32(Value *Ptr, unsigned Idx0, const Twine &Name = "") {
1279 return CreateConstGEP1_32(nullptr, Ptr, Idx0, Name);
1280 }
1281 Value *CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0,
1282 const Twine &Name = "") {
1283 Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), Idx0);
1284
1285 if (Constant *PC = dyn_cast<Constant>(Ptr))
1286 return Insert(Folder.CreateGetElementPtr(Ty, PC, Idx), Name);
1287
1288 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name);
1289 }
1290 Value *CreateConstInBoundsGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0,
1291 const Twine &Name = "") {
1292 Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), Idx0);
1293
1294 if (Constant *PC = dyn_cast<Constant>(Ptr))
1295 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idx), Name);
1296
1297 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name);
1298 }
1299 Value *CreateConstGEP2_32(Type *Ty, Value *Ptr, unsigned Idx0, unsigned Idx1,
1300 const Twine &Name = "") {
1301 Value *Idxs[] = {
1302 ConstantInt::get(Type::getInt32Ty(Context), Idx0),
1303 ConstantInt::get(Type::getInt32Ty(Context), Idx1)
1304 };
1305
1306 if (Constant *PC = dyn_cast<Constant>(Ptr))
1307 return Insert(Folder.CreateGetElementPtr(Ty, PC, Idxs), Name);
1308
1309 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idxs), Name);
1310 }
1311 Value *CreateConstInBoundsGEP2_32(Type *Ty, Value *Ptr, unsigned Idx0,
1312 unsigned Idx1, const Twine &Name = "") {
1313 Value *Idxs[] = {
1314 ConstantInt::get(Type::getInt32Ty(Context), Idx0),
1315 ConstantInt::get(Type::getInt32Ty(Context), Idx1)
1316 };
1317
1318 if (Constant *PC = dyn_cast<Constant>(Ptr))
1319 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idxs), Name);
1320
1321 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idxs), Name);
1322 }
1323 Value *CreateConstGEP1_64(Value *Ptr, uint64_t Idx0, const Twine &Name = "") {
1324 Value *Idx = ConstantInt::get(Type::getInt64Ty(Context), Idx0);
1325
1326 if (Constant *PC = dyn_cast<Constant>(Ptr))
1327 return Insert(Folder.CreateGetElementPtr(nullptr, PC, Idx), Name);
1328
1329 return Insert(GetElementPtrInst::Create(nullptr, Ptr, Idx), Name);
1330 }
1331 Value *CreateConstInBoundsGEP1_64(Value *Ptr, uint64_t Idx0,
1332 const Twine &Name = "") {
1333 Value *Idx = ConstantInt::get(Type::getInt64Ty(Context), Idx0);
1334
1335 if (Constant *PC = dyn_cast<Constant>(Ptr))
1336 return Insert(Folder.CreateInBoundsGetElementPtr(nullptr, PC, Idx), Name);
1337
1338 return Insert(GetElementPtrInst::CreateInBounds(nullptr, Ptr, Idx), Name);
1339 }
1340 Value *CreateConstGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t Idx1,
1341 const Twine &Name = "") {
1342 Value *Idxs[] = {
1343 ConstantInt::get(Type::getInt64Ty(Context), Idx0),
1344 ConstantInt::get(Type::getInt64Ty(Context), Idx1)
1345 };
1346
1347 if (Constant *PC = dyn_cast<Constant>(Ptr))
1348 return Insert(Folder.CreateGetElementPtr(nullptr, PC, Idxs), Name);
1349
1350 return Insert(GetElementPtrInst::Create(nullptr, Ptr, Idxs), Name);
1351 }
1352 Value *CreateConstInBoundsGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t Idx1,
1353 const Twine &Name = "") {
1354 Value *Idxs[] = {
1355 ConstantInt::get(Type::getInt64Ty(Context), Idx0),
1356 ConstantInt::get(Type::getInt64Ty(Context), Idx1)
1357 };
1358
1359 if (Constant *PC = dyn_cast<Constant>(Ptr))
1360 return Insert(Folder.CreateInBoundsGetElementPtr(nullptr, PC, Idxs),
1361 Name);
1362
1363 return Insert(GetElementPtrInst::CreateInBounds(nullptr, Ptr, Idxs), Name);
1364 }
1365 Value *CreateStructGEP(Type *Ty, Value *Ptr, unsigned Idx,
1366 const Twine &Name = "") {
1367 return CreateConstInBoundsGEP2_32(Ty, Ptr, 0, Idx, Name);
1368 }
1369
1370 /// \brief Same as CreateGlobalString, but return a pointer with "i8*" type
1371 /// instead of a pointer to array of i8.
1372 Value *CreateGlobalStringPtr(StringRef Str, const Twine &Name = "",
1373 unsigned AddressSpace = 0) {
1374 GlobalVariable *gv = CreateGlobalString(Str, Name, AddressSpace);
1375 Value *zero = ConstantInt::get(Type::getInt32Ty(Context), 0);
1376 Value *Args[] = { zero, zero };
1377 return CreateInBoundsGEP(gv->getValueType(), gv, Args, Name);
1378 }
1379
1380 //===--------------------------------------------------------------------===//
1381 // Instruction creation methods: Cast/Conversion Operators
1382 //===--------------------------------------------------------------------===//
1383
1384 Value *CreateTrunc(Value *V, Type *DestTy, const Twine &Name = "") {
1385 return CreateCast(Instruction::Trunc, V, DestTy, Name);
1386 }
1387 Value *CreateZExt(Value *V, Type *DestTy, const Twine &Name = "") {
1388 return CreateCast(Instruction::ZExt, V, DestTy, Name);
1389 }
1390 Value *CreateSExt(Value *V, Type *DestTy, const Twine &Name = "") {
1391 return CreateCast(Instruction::SExt, V, DestTy, Name);
1392 }
1393 /// \brief Create a ZExt or Trunc from the integer value V to DestTy. Return
1394 /// the value untouched if the type of V is already DestTy.
1395 Value *CreateZExtOrTrunc(Value *V, Type *DestTy,
1396 const Twine &Name = "") {
1397 assert(V->getType()->isIntOrIntVectorTy() &&(static_cast <bool> (V->getType()->isIntOrIntVectorTy
() && DestTy->isIntOrIntVectorTy() && "Can only zero extend/truncate integers!"
) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only zero extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/IR/IRBuilder.h"
, 1399, __extension__ __PRETTY_FUNCTION__))
1398 DestTy->isIntOrIntVectorTy() &&(static_cast <bool> (V->getType()->isIntOrIntVectorTy
() && DestTy->isIntOrIntVectorTy() && "Can only zero extend/truncate integers!"
) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only zero extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/IR/IRBuilder.h"
, 1399, __extension__ __PRETTY_FUNCTION__))
1399 "Can only zero extend/truncate integers!")(static_cast <bool> (V->getType()->isIntOrIntVectorTy
() && DestTy->isIntOrIntVectorTy() && "Can only zero extend/truncate integers!"
) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only zero extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/IR/IRBuilder.h"
, 1399, __extension__ __PRETTY_FUNCTION__))
;
1400 Type *VTy = V->getType();
1401 if (VTy->getScalarSizeInBits() < DestTy->getScalarSizeInBits())
1402 return CreateZExt(V, DestTy, Name);
1403 if (VTy->getScalarSizeInBits() > DestTy->getScalarSizeInBits())
1404 return CreateTrunc(V, DestTy, Name);
1405 return V;
1406 }
1407 /// \brief Create a SExt or Trunc from the integer value V to DestTy. Return
1408 /// the value untouched if the type of V is already DestTy.
1409 Value *CreateSExtOrTrunc(Value *V, Type *DestTy,
1410 const Twine &Name = "") {
1411 assert(V->getType()->isIntOrIntVectorTy() &&(static_cast <bool> (V->getType()->isIntOrIntVectorTy
() && DestTy->isIntOrIntVectorTy() && "Can only sign extend/truncate integers!"
) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only sign extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/IR/IRBuilder.h"
, 1413, __extension__ __PRETTY_FUNCTION__))
1412 DestTy->isIntOrIntVectorTy() &&(static_cast <bool> (V->getType()->isIntOrIntVectorTy
() && DestTy->isIntOrIntVectorTy() && "Can only sign extend/truncate integers!"
) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only sign extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/IR/IRBuilder.h"
, 1413, __extension__ __PRETTY_FUNCTION__))
1413 "Can only sign extend/truncate integers!")(static_cast <bool> (V->getType()->isIntOrIntVectorTy
() && DestTy->isIntOrIntVectorTy() && "Can only sign extend/truncate integers!"
) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only sign extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/IR/IRBuilder.h"
, 1413, __extension__ __PRETTY_FUNCTION__))
;
1414 Type *VTy = V->getType();
1415 if (VTy->getScalarSizeInBits() < DestTy->getScalarSizeInBits())
1416 return CreateSExt(V, DestTy, Name);
1417 if (VTy->getScalarSizeInBits() > DestTy->getScalarSizeInBits())
1418 return CreateTrunc(V, DestTy, Name);
1419 return V;
1420 }
1421 Value *CreateFPToUI(Value *V, Type *DestTy, const Twine &Name = ""){
1422 return CreateCast(Instruction::FPToUI, V, DestTy, Name);
1423 }
1424 Value *CreateFPToSI(Value *V, Type *DestTy, const Twine &Name = ""){
1425 return CreateCast(Instruction::FPToSI, V, DestTy, Name);
1426 }
1427 Value *CreateUIToFP(Value *V, Type *DestTy, const Twine &Name = ""){
1428 return CreateCast(Instruction::UIToFP, V, DestTy, Name);
1429 }
1430 Value *CreateSIToFP(Value *V, Type *DestTy, const Twine &Name = ""){
1431 return CreateCast(Instruction::SIToFP, V, DestTy, Name);
1432 }
1433 Value *CreateFPTrunc(Value *V, Type *DestTy,
1434 const Twine &Name = "") {
1435 return CreateCast(Instruction::FPTrunc, V, DestTy, Name);
1436 }
1437 Value *CreateFPExt(Value *V, Type *DestTy, const Twine &Name = "") {
1438 return CreateCast(Instruction::FPExt, V, DestTy, Name);
1439 }
1440 Value *CreatePtrToInt(Value *V, Type *DestTy,
1441 const Twine &Name = "") {
1442 return CreateCast(Instruction::PtrToInt, V, DestTy, Name);
1443 }
1444 Value *CreateIntToPtr(Value *V, Type *DestTy,
1445 const Twine &Name = "") {
1446 return CreateCast(Instruction::IntToPtr, V, DestTy, Name);
1447 }
1448 Value *CreateBitCast(Value *V, Type *DestTy,
1449 const Twine &Name = "") {
1450 return CreateCast(Instruction::BitCast, V, DestTy, Name);
1451 }
1452 Value *CreateAddrSpaceCast(Value *V, Type *DestTy,
1453 const Twine &Name = "") {
1454 return CreateCast(Instruction::AddrSpaceCast, V, DestTy, Name);
1455 }
1456 Value *CreateZExtOrBitCast(Value *V, Type *DestTy,
1457 const Twine &Name = "") {
1458 if (V->getType() == DestTy)
1459 return V;
1460 if (Constant *VC = dyn_cast<Constant>(V))
1461 return Insert(Folder.CreateZExtOrBitCast(VC, DestTy), Name);
1462 return Insert(CastInst::CreateZExtOrBitCast(V, DestTy), Name);
1463 }
1464 Value *CreateSExtOrBitCast(Value *V, Type *DestTy,
1465 const Twine &Name = "") {
1466 if (V->getType() == DestTy)
1467 return V;
1468 if (Constant *VC = dyn_cast<Constant>(V))
1469 return Insert(Folder.CreateSExtOrBitCast(VC, DestTy), Name);
1470 return Insert(CastInst::CreateSExtOrBitCast(V, DestTy), Name);
1471 }
1472 Value *CreateTruncOrBitCast(Value *V, Type *DestTy,
1473 const Twine &Name = "") {
1474 if (V->getType() == DestTy)
1475 return V;
1476 if (Constant *VC = dyn_cast<Constant>(V))
1477 return Insert(Folder.CreateTruncOrBitCast(VC, DestTy), Name);
1478 return Insert(CastInst::CreateTruncOrBitCast(V, DestTy), Name);
1479 }
1480 Value *CreateCast(Instruction::CastOps Op, Value *V, Type *DestTy,
1481 const Twine &Name = "") {
1482 if (V->getType() == DestTy)
1483 return V;
1484 if (Constant *VC = dyn_cast<Constant>(V))
1485 return Insert(Folder.CreateCast(Op, VC, DestTy), Name);
1486 return Insert(CastInst::Create(Op, V, DestTy), Name);
1487 }
1488 Value *CreatePointerCast(Value *V, Type *DestTy,
1489 const Twine &Name = "") {
1490 if (V->getType() == DestTy)
1491 return V;
1492 if (Constant *VC = dyn_cast<Constant>(V))
1493 return Insert(Folder.CreatePointerCast(VC, DestTy), Name);
1494 return Insert(CastInst::CreatePointerCast(V, DestTy), Name);
1495 }
1496
1497 Value *CreatePointerBitCastOrAddrSpaceCast(Value *V, Type *DestTy,
1498 const Twine &Name = "") {
1499 if (V->getType() == DestTy)
1500 return V;
1501
1502 if (Constant *VC = dyn_cast<Constant>(V)) {
1503 return Insert(Folder.CreatePointerBitCastOrAddrSpaceCast(VC, DestTy),
1504 Name);
1505 }
1506
1507 return Insert(CastInst::CreatePointerBitCastOrAddrSpaceCast(V, DestTy),
1508 Name);
1509 }
1510
1511 Value *CreateIntCast(Value *V, Type *DestTy, bool isSigned,
1512 const Twine &Name = "") {
1513 if (V->getType() == DestTy)
1514 return V;
1515 if (Constant *VC = dyn_cast<Constant>(V))
1516 return Insert(Folder.CreateIntCast(VC, DestTy, isSigned), Name);
1517 return Insert(CastInst::CreateIntegerCast(V, DestTy, isSigned), Name);
1518 }
1519
1520 Value *CreateBitOrPointerCast(Value *V, Type *DestTy,
1521 const Twine &Name = "") {
1522 if (V->getType() == DestTy)
1523 return V;
1524 if (V->getType()->isPtrOrPtrVectorTy() && DestTy->isIntOrIntVectorTy())
1525 return CreatePtrToInt(V, DestTy, Name);
1526 if (V->getType()->isIntOrIntVectorTy() && DestTy->isPtrOrPtrVectorTy())
1527 return CreateIntToPtr(V, DestTy, Name);
1528
1529 return CreateBitCast(V, DestTy, Name);
1530 }
1531
1532public:
1533 Value *CreateFPCast(Value *V, Type *DestTy, const Twine &Name = "") {
1534 if (V->getType() == DestTy)
1535 return V;
1536 if (Constant *VC = dyn_cast<Constant>(V))
1537 return Insert(Folder.CreateFPCast(VC, DestTy), Name);
1538 return Insert(CastInst::CreateFPCast(V, DestTy), Name);
1539 }
1540
1541 // \brief Provided to resolve 'CreateIntCast(Ptr, Ptr, "...")', giving a
1542 // compile time error, instead of converting the string to bool for the
1543 // isSigned parameter.
1544 Value *CreateIntCast(Value *, Type *, const char *) = delete;
1545
1546 //===--------------------------------------------------------------------===//
1547 // Instruction creation methods: Compare Instructions
1548 //===--------------------------------------------------------------------===//
1549
1550 Value *CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name = "") {
1551 return CreateICmp(ICmpInst::ICMP_EQ, LHS, RHS, Name);
1552 }
1553 Value *CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name = "") {
1554 return CreateICmp(ICmpInst::ICMP_NE, LHS, RHS, Name);
1555 }
1556 Value *CreateICmpUGT(Value *LHS, Value *RHS, const Twine &Name = "") {
1557 return CreateICmp(ICmpInst::ICMP_UGT, LHS, RHS, Name);
1558 }
1559 Value *CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name = "") {
1560 return CreateICmp(ICmpInst::ICMP_UGE, LHS, RHS, Name);
1561 }
1562 Value *CreateICmpULT(Value *LHS, Value *RHS, const Twine &Name = "") {
1563 return CreateICmp(ICmpInst::ICMP_ULT, LHS, RHS, Name);
1564 }
1565 Value *CreateICmpULE(Value *LHS, Value *RHS, const Twine &Name = "") {
1566 return CreateICmp(ICmpInst::ICMP_ULE, LHS, RHS, Name);
1567 }
1568 Value *CreateICmpSGT(Value *LHS, Value *RHS, const Twine &Name = "") {
1569 return CreateICmp(ICmpInst::ICMP_SGT, LHS, RHS, Name);
1570 }
1571 Value *CreateICmpSGE(Value *LHS, Value *RHS, const Twine &Name = "") {
1572 return CreateICmp(ICmpInst::ICMP_SGE, LHS, RHS, Name);
1573 }
1574 Value *CreateICmpSLT(Value *LHS, Value *RHS, const Twine &Name = "") {
1575 return CreateICmp(ICmpInst::ICMP_SLT, LHS, RHS, Name);
1576 }
1577 Value *CreateICmpSLE(Value *LHS, Value *RHS, const Twine &Name = "") {
1578 return CreateICmp(ICmpInst::ICMP_SLE, LHS, RHS, Name);
1579 }
1580
1581 Value *CreateFCmpOEQ(Value *LHS, Value *RHS, const Twine &Name = "",
1582 MDNode *FPMathTag = nullptr) {
1583 return CreateFCmp(FCmpInst::FCMP_OEQ, LHS, RHS, Name, FPMathTag);
1584 }
1585 Value *CreateFCmpOGT(Value *LHS, Value *RHS, const Twine &Name = "",
1586 MDNode *FPMathTag = nullptr) {
1587 return CreateFCmp(FCmpInst::FCMP_OGT, LHS, RHS, Name, FPMathTag);
1588 }
1589 Value *CreateFCmpOGE(Value *LHS, Value *RHS, const Twine &Name = "",
1590 MDNode *FPMathTag = nullptr) {
1591 return CreateFCmp(FCmpInst::FCMP_OGE, LHS, RHS, Name, FPMathTag);
1592 }
1593 Value *CreateFCmpOLT(Value *LHS, Value *RHS, const Twine &Name = "",
1594 MDNode *FPMathTag = nullptr) {
1595 return CreateFCmp(FCmpInst::FCMP_OLT, LHS, RHS, Name, FPMathTag);
1596 }
1597 Value *CreateFCmpOLE(Value *LHS, Value *RHS, const Twine &Name = "",
1598 MDNode *FPMathTag = nullptr) {
1599 return CreateFCmp(FCmpInst::FCMP_OLE, LHS, RHS, Name, FPMathTag);
1600 }
1601 Value *CreateFCmpONE(Value *LHS, Value *RHS, const Twine &Name = "",
1602 MDNode *FPMathTag = nullptr) {
1603 return CreateFCmp(FCmpInst::FCMP_ONE, LHS, RHS, Name, FPMathTag);
1604 }
1605 Value *CreateFCmpORD(Value *LHS, Value *RHS, const Twine &Name = "",
1606 MDNode *FPMathTag = nullptr) {
1607 return CreateFCmp(FCmpInst::FCMP_ORD, LHS, RHS, Name, FPMathTag);
1608 }
1609 Value *CreateFCmpUNO(Value *LHS, Value *RHS, const Twine &Name = "",
1610 MDNode *FPMathTag = nullptr) {
1611 return CreateFCmp(FCmpInst::FCMP_UNO, LHS, RHS, Name, FPMathTag);
1612 }
1613 Value *CreateFCmpUEQ(Value *LHS, Value *RHS, const Twine &Name = "",
1614 MDNode *FPMathTag = nullptr) {
1615 return CreateFCmp(FCmpInst::FCMP_UEQ, LHS, RHS, Name, FPMathTag);
1616 }
1617 Value *CreateFCmpUGT(Value *LHS, Value *RHS, const Twine &Name = "",
1618 MDNode *FPMathTag = nullptr) {
1619 return CreateFCmp(FCmpInst::FCMP_UGT, LHS, RHS, Name, FPMathTag);
1620 }
1621 Value *CreateFCmpUGE(Value *LHS, Value *RHS, const Twine &Name = "",
1622 MDNode *FPMathTag = nullptr) {
1623 return CreateFCmp(FCmpInst::FCMP_UGE, LHS, RHS, Name, FPMathTag);
1624 }
1625 Value *CreateFCmpULT(Value *LHS, Value *RHS, const Twine &Name = "",
1626 MDNode *FPMathTag = nullptr) {
1627 return CreateFCmp(FCmpInst::FCMP_ULT, LHS, RHS, Name, FPMathTag);
1628 }
1629 Value *CreateFCmpULE(Value *LHS, Value *RHS, const Twine &Name = "",
1630 MDNode *FPMathTag = nullptr) {
1631 return CreateFCmp(FCmpInst::FCMP_ULE, LHS, RHS, Name, FPMathTag);
1632 }
1633 Value *CreateFCmpUNE(Value *LHS, Value *RHS, const Twine &Name = "",
1634 MDNode *FPMathTag = nullptr) {
1635 return CreateFCmp(FCmpInst::FCMP_UNE, LHS, RHS, Name, FPMathTag);
1636 }
1637
1638 Value *CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS,
1639 const Twine &Name = "") {
1640 if (Constant *LC = dyn_cast<Constant>(LHS))
1641 if (Constant *RC = dyn_cast<Constant>(RHS))
1642 return Insert(Folder.CreateICmp(P, LC, RC), Name);
1643 return Insert(new ICmpInst(P, LHS, RHS), Name);
1644 }
1645 Value *CreateFCmp(CmpInst::Predicate P, Value *LHS, Value *RHS,
1646 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
1647 if (Constant *LC = dyn_cast<Constant>(LHS))
1648 if (Constant *RC = dyn_cast<Constant>(RHS))
1649 return Insert(Folder.CreateFCmp(P, LC, RC), Name);
1650 return Insert(AddFPMathAttributes(new FCmpInst(P, LHS, RHS),
1651 FPMathTag, FMF), Name);
1652 }
1653
1654 //===--------------------------------------------------------------------===//
1655 // Instruction creation methods: Other Instructions
1656 //===--------------------------------------------------------------------===//
1657
1658 PHINode *CreatePHI(Type *Ty, unsigned NumReservedValues,
1659 const Twine &Name = "") {
1660 return Insert(PHINode::Create(Ty, NumReservedValues), Name);
1661 }
1662
1663 CallInst *CreateCall(Value *Callee, ArrayRef<Value *> Args = None,
1664 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
1665 PointerType *PTy = cast<PointerType>(Callee->getType());
1666 FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
1667 return CreateCall(FTy, Callee, Args, Name, FPMathTag);
1668 }
1669
1670 CallInst *CreateCall(FunctionType *FTy, Value *Callee,
1671 ArrayRef<Value *> Args, const Twine &Name = "",
1672 MDNode *FPMathTag = nullptr) {
1673 CallInst *CI = CallInst::Create(FTy, Callee, Args, DefaultOperandBundles);
1674 if (isa<FPMathOperator>(CI))
1675 CI = cast<CallInst>(AddFPMathAttributes(CI, FPMathTag, FMF));
1676 return Insert(CI, Name);
1677 }
1678
1679 CallInst *CreateCall(Value *Callee, ArrayRef<Value *> Args,
1680 ArrayRef<OperandBundleDef> OpBundles,
1681 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
1682 CallInst *CI = CallInst::Create(Callee, Args, OpBundles);
1683 if (isa<FPMathOperator>(CI))
1684 CI = cast<CallInst>(AddFPMathAttributes(CI, FPMathTag, FMF));
1685 return Insert(CI, Name);
1686 }
1687
1688 CallInst *CreateCall(Function *Callee, ArrayRef<Value *> Args,
1689 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
1690 return CreateCall(Callee->getFunctionType(), Callee, Args, Name, FPMathTag);
1691 }
1692
1693 Value *CreateSelect(Value *C, Value *True, Value *False,
1694 const Twine &Name = "", Instruction *MDFrom = nullptr) {
1695 if (Constant *CC = dyn_cast<Constant>(C))
1696 if (Constant *TC = dyn_cast<Constant>(True))
1697 if (Constant *FC = dyn_cast<Constant>(False))
1698 return Insert(Folder.CreateSelect(CC, TC, FC), Name);
1699
1700 SelectInst *Sel = SelectInst::Create(C, True, False);
1701 if (MDFrom) {
1702 MDNode *Prof = MDFrom->getMetadata(LLVMContext::MD_prof);
1703 MDNode *Unpred = MDFrom->getMetadata(LLVMContext::MD_unpredictable);
1704 Sel = addBranchMetadata(Sel, Prof, Unpred);
1705 }
1706 return Insert(Sel, Name);
1707 }
1708
1709 VAArgInst *CreateVAArg(Value *List, Type *Ty, const Twine &Name = "") {
1710 return Insert(new VAArgInst(List, Ty), Name);
1711 }
1712
1713 Value *CreateExtractElement(Value *Vec, Value *Idx,
1714 const Twine &Name = "") {
1715 if (Constant *VC = dyn_cast<Constant>(Vec))
1716 if (Constant *IC = dyn_cast<Constant>(Idx))
1717 return Insert(Folder.CreateExtractElement(VC, IC), Name);
1718 return Insert(ExtractElementInst::Create(Vec, Idx), Name);
1719 }
1720
1721 Value *CreateExtractElement(Value *Vec, uint64_t Idx,
1722 const Twine &Name = "") {
1723 return CreateExtractElement(Vec, getInt64(Idx), Name);
1724 }
1725
1726 Value *CreateInsertElement(Value *Vec, Value *NewElt, Value *Idx,
1727 const Twine &Name = "") {
1728 if (Constant *VC = dyn_cast<Constant>(Vec))
1729 if (Constant *NC = dyn_cast<Constant>(NewElt))
1730 if (Constant *IC = dyn_cast<Constant>(Idx))
1731 return Insert(Folder.CreateInsertElement(VC, NC, IC), Name);
1732 return Insert(InsertElementInst::Create(Vec, NewElt, Idx), Name);
1733 }
1734
1735 Value *CreateInsertElement(Value *Vec, Value *NewElt, uint64_t Idx,
1736 const Twine &Name = "") {
1737 return CreateInsertElement(Vec, NewElt, getInt64(Idx), Name);
1738 }
1739
1740 Value *CreateShuffleVector(Value *V1, Value *V2, Value *Mask,
1741 const Twine &Name = "") {
1742 if (Constant *V1C = dyn_cast<Constant>(V1))
14
Calling 'dyn_cast'
34
Returning from 'dyn_cast'
35
Taking false branch
61
Calling 'dyn_cast'
81
Returning from 'dyn_cast'
82
Taking false branch
107
Calling 'dyn_cast'
127
Returning from 'dyn_cast'
128
Taking false branch
1743 if (Constant *V2C = dyn_cast<Constant>(V2))
1744 if (Constant *MC = dyn_cast<Constant>(Mask))
1745 return Insert(Folder.CreateShuffleVector(V1C, V2C, MC), Name);
1746 return Insert(new ShuffleVectorInst(V1, V2, Mask), Name);
36
Calling 'IRBuilder::Insert'
53
Returning from 'IRBuilder::Insert'
83
Calling 'IRBuilder::Insert'
99
Returning from 'IRBuilder::Insert'
129
Calling 'IRBuilder::Insert'
145
Returning from 'IRBuilder::Insert'
1747 }
1748
1749 Value *CreateShuffleVector(Value *V1, Value *V2, ArrayRef<uint32_t> IntMask,
1750 const Twine &Name = "") {
1751 Value *Mask = ConstantDataVector::get(Context, IntMask);
1752 return CreateShuffleVector(V1, V2, Mask, Name);
13
Calling 'IRBuilder::CreateShuffleVector'
54
Returning from 'IRBuilder::CreateShuffleVector'
60
Calling 'IRBuilder::CreateShuffleVector'
100
Returning from 'IRBuilder::CreateShuffleVector'
106
Calling 'IRBuilder::CreateShuffleVector'
146
Returning from 'IRBuilder::CreateShuffleVector'
1753 }
1754
1755 Value *CreateExtractValue(Value *Agg,
1756 ArrayRef<unsigned> Idxs,
1757 const Twine &Name = "") {
1758 if (Constant *AggC = dyn_cast<Constant>(Agg))
1759 return Insert(Folder.CreateExtractValue(AggC, Idxs), Name);
1760 return Insert(ExtractValueInst::Create(Agg, Idxs), Name);
1761 }
1762
1763 Value *CreateInsertValue(Value *Agg, Value *Val,
1764 ArrayRef<unsigned> Idxs,
1765 const Twine &Name = "") {
1766 if (Constant *AggC = dyn_cast<Constant>(Agg))
1767 if (Constant *ValC = dyn_cast<Constant>(Val))
1768 return Insert(Folder.CreateInsertValue(AggC, ValC, Idxs), Name);
1769 return Insert(InsertValueInst::Create(Agg, Val, Idxs), Name);
1770 }
1771
1772 LandingPadInst *CreateLandingPad(Type *Ty, unsigned NumClauses,
1773 const Twine &Name = "") {
1774 return Insert(LandingPadInst::Create(Ty, NumClauses), Name);
1775 }
1776
1777 //===--------------------------------------------------------------------===//
1778 // Utility creation methods
1779 //===--------------------------------------------------------------------===//
1780
1781 /// \brief Return an i1 value testing if \p Arg is null.
1782 Value *CreateIsNull(Value *Arg, const Twine &Name = "") {
1783 return CreateICmpEQ(Arg, Constant::getNullValue(Arg->getType()),
1784 Name);
1785 }
1786
1787 /// \brief Return an i1 value testing if \p Arg is not null.
1788 Value *CreateIsNotNull(Value *Arg, const Twine &Name = "") {
1789 return CreateICmpNE(Arg, Constant::getNullValue(Arg->getType()),
1790 Name);
1791 }
1792
1793 /// \brief Return the i64 difference between two pointer values, dividing out
1794 /// the size of the pointed-to objects.
1795 ///
1796 /// This is intended to implement C-style pointer subtraction. As such, the
1797 /// pointers must be appropriately aligned for their element types and
1798 /// pointing into the same object.
1799 Value *CreatePtrDiff(Value *LHS, Value *RHS, const Twine &Name = "") {
1800 assert(LHS->getType() == RHS->getType() &&(static_cast <bool> (LHS->getType() == RHS->getType
() && "Pointer subtraction operand types must match!"
) ? void (0) : __assert_fail ("LHS->getType() == RHS->getType() && \"Pointer subtraction operand types must match!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/IR/IRBuilder.h"
, 1801, __extension__ __PRETTY_FUNCTION__))
1801 "Pointer subtraction operand types must match!")(static_cast <bool> (LHS->getType() == RHS->getType
() && "Pointer subtraction operand types must match!"
) ? void (0) : __assert_fail ("LHS->getType() == RHS->getType() && \"Pointer subtraction operand types must match!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/IR/IRBuilder.h"
, 1801, __extension__ __PRETTY_FUNCTION__))
;
1802 PointerType *ArgType = cast<PointerType>(LHS->getType());
1803 Value *LHS_int = CreatePtrToInt(LHS, Type::getInt64Ty(Context));
1804 Value *RHS_int = CreatePtrToInt(RHS, Type::getInt64Ty(Context));
1805 Value *Difference = CreateSub(LHS_int, RHS_int);
1806 return CreateExactSDiv(Difference,
1807 ConstantExpr::getSizeOf(ArgType->getElementType()),
1808 Name);
1809 }
1810
1811 /// \brief Create an invariant.group.barrier intrinsic call, that stops
1812 /// optimizer to propagate equality using invariant.group metadata.
1813 /// If Ptr type is different from pointer to i8, it's casted to pointer to i8
1814 /// in the same address space before call and casted back to Ptr type after
1815 /// call.
1816 Value *CreateInvariantGroupBarrier(Value *Ptr) {
1817 assert(isa<PointerType>(Ptr->getType()) &&(static_cast <bool> (isa<PointerType>(Ptr->getType
()) && "invariant.group.barrier only applies to pointers."
) ? void (0) : __assert_fail ("isa<PointerType>(Ptr->getType()) && \"invariant.group.barrier only applies to pointers.\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/IR/IRBuilder.h"
, 1818, __extension__ __PRETTY_FUNCTION__))
1818 "invariant.group.barrier only applies to pointers.")(static_cast <bool> (isa<PointerType>(Ptr->getType
()) && "invariant.group.barrier only applies to pointers."
) ? void (0) : __assert_fail ("isa<PointerType>(Ptr->getType()) && \"invariant.group.barrier only applies to pointers.\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/IR/IRBuilder.h"
, 1818, __extension__ __PRETTY_FUNCTION__))
;
1819 auto *PtrType = Ptr->getType();
1820 auto *Int8PtrTy = getInt8PtrTy(PtrType->getPointerAddressSpace());
1821 if (PtrType != Int8PtrTy)
1822 Ptr = CreateBitCast(Ptr, Int8PtrTy);
1823 Module *M = BB->getParent()->getParent();
1824 Function *FnInvariantGroupBarrier = Intrinsic::getDeclaration(
1825 M, Intrinsic::invariant_group_barrier, {Int8PtrTy});
1826
1827 assert(FnInvariantGroupBarrier->getReturnType() == Int8PtrTy &&(static_cast <bool> (FnInvariantGroupBarrier->getReturnType
() == Int8PtrTy && FnInvariantGroupBarrier->getFunctionType
()->getParamType(0) == Int8PtrTy && "InvariantGroupBarrier should take and return the same type"
) ? void (0) : __assert_fail ("FnInvariantGroupBarrier->getReturnType() == Int8PtrTy && FnInvariantGroupBarrier->getFunctionType()->getParamType(0) == Int8PtrTy && \"InvariantGroupBarrier should take and return the same type\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/IR/IRBuilder.h"
, 1830, __extension__ __PRETTY_FUNCTION__))
1828 FnInvariantGroupBarrier->getFunctionType()->getParamType(0) ==(static_cast <bool> (FnInvariantGroupBarrier->getReturnType
() == Int8PtrTy && FnInvariantGroupBarrier->getFunctionType
()->getParamType(0) == Int8PtrTy && "InvariantGroupBarrier should take and return the same type"
) ? void (0) : __assert_fail ("FnInvariantGroupBarrier->getReturnType() == Int8PtrTy && FnInvariantGroupBarrier->getFunctionType()->getParamType(0) == Int8PtrTy && \"InvariantGroupBarrier should take and return the same type\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/IR/IRBuilder.h"
, 1830, __extension__ __PRETTY_FUNCTION__))
1829 Int8PtrTy &&(static_cast <bool> (FnInvariantGroupBarrier->getReturnType
() == Int8PtrTy && FnInvariantGroupBarrier->getFunctionType
()->getParamType(0) == Int8PtrTy && "InvariantGroupBarrier should take and return the same type"
) ? void (0) : __assert_fail ("FnInvariantGroupBarrier->getReturnType() == Int8PtrTy && FnInvariantGroupBarrier->getFunctionType()->getParamType(0) == Int8PtrTy && \"InvariantGroupBarrier should take and return the same type\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/IR/IRBuilder.h"
, 1830, __extension__ __PRETTY_FUNCTION__))
1830 "InvariantGroupBarrier should take and return the same type")(static_cast <bool> (FnInvariantGroupBarrier->getReturnType
() == Int8PtrTy && FnInvariantGroupBarrier->getFunctionType
()->getParamType(0) == Int8PtrTy && "InvariantGroupBarrier should take and return the same type"
) ? void (0) : __assert_fail ("FnInvariantGroupBarrier->getReturnType() == Int8PtrTy && FnInvariantGroupBarrier->getFunctionType()->getParamType(0) == Int8PtrTy && \"InvariantGroupBarrier should take and return the same type\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/IR/IRBuilder.h"
, 1830, __extension__ __PRETTY_FUNCTION__))
;
1831
1832 CallInst *Fn = CreateCall(FnInvariantGroupBarrier, {Ptr});
1833
1834 if (PtrType != Int8PtrTy)
1835 return CreateBitCast(Fn, PtrType);
1836 return Fn;
1837 }
1838
1839 /// \brief Return a vector value that contains \arg V broadcasted to \p
1840 /// NumElts elements.
1841 Value *CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name = "") {
1842 assert(NumElts > 0 && "Cannot splat to an empty vector!")(static_cast <bool> (NumElts > 0 && "Cannot splat to an empty vector!"
) ? void (0) : __assert_fail ("NumElts > 0 && \"Cannot splat to an empty vector!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/IR/IRBuilder.h"
, 1842, __extension__ __PRETTY_FUNCTION__))
;
1843
1844 // First insert it into an undef vector so we can shuffle it.
1845 Type *I32Ty = getInt32Ty();
1846 Value *Undef = UndefValue::get(VectorType::get(V->getType(), NumElts));
1847 V = CreateInsertElement(Undef, V, ConstantInt::get(I32Ty, 0),
1848 Name + ".splatinsert");
1849
1850 // Shuffle the value across the desired number of elements.
1851 Value *Zeros = ConstantAggregateZero::get(VectorType::get(I32Ty, NumElts));
1852 return CreateShuffleVector(V, Undef, Zeros, Name + ".splat");
1853 }
1854
1855 /// \brief Return a value that has been extracted from a larger integer type.
1856 Value *CreateExtractInteger(const DataLayout &DL, Value *From,
1857 IntegerType *ExtractedTy, uint64_t Offset,
1858 const Twine &Name) {
1859 IntegerType *IntTy = cast<IntegerType>(From->getType());
1860 assert(DL.getTypeStoreSize(ExtractedTy) + Offset <=(static_cast <bool> (DL.getTypeStoreSize(ExtractedTy) +
Offset <= DL.getTypeStoreSize(IntTy) && "Element extends past full value"
) ? void (0) : __assert_fail ("DL.getTypeStoreSize(ExtractedTy) + Offset <= DL.getTypeStoreSize(IntTy) && \"Element extends past full value\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/IR/IRBuilder.h"
, 1862, __extension__ __PRETTY_FUNCTION__))
1861 DL.getTypeStoreSize(IntTy) &&(static_cast <bool> (DL.getTypeStoreSize(ExtractedTy) +
Offset <= DL.getTypeStoreSize(IntTy) && "Element extends past full value"
) ? void (0) : __assert_fail ("DL.getTypeStoreSize(ExtractedTy) + Offset <= DL.getTypeStoreSize(IntTy) && \"Element extends past full value\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/IR/IRBuilder.h"
, 1862, __extension__ __PRETTY_FUNCTION__))
1862 "Element extends past full value")(static_cast <bool> (DL.getTypeStoreSize(ExtractedTy) +
Offset <= DL.getTypeStoreSize(IntTy) && "Element extends past full value"
) ? void (0) : __assert_fail ("DL.getTypeStoreSize(ExtractedTy) + Offset <= DL.getTypeStoreSize(IntTy) && \"Element extends past full value\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/IR/IRBuilder.h"
, 1862, __extension__ __PRETTY_FUNCTION__))
;
1863 uint64_t ShAmt = 8 * Offset;
1864 Value *V = From;
1865 if (DL.isBigEndian())
1866 ShAmt = 8 * (DL.getTypeStoreSize(IntTy) -
1867 DL.getTypeStoreSize(ExtractedTy) - Offset);
1868 if (ShAmt) {
1869 V = CreateLShr(V, ShAmt, Name + ".shift");
1870 }
1871 assert(ExtractedTy->getBitWidth() <= IntTy->getBitWidth() &&(static_cast <bool> (ExtractedTy->getBitWidth() <=
IntTy->getBitWidth() && "Cannot extract to a larger integer!"
) ? void (0) : __assert_fail ("ExtractedTy->getBitWidth() <= IntTy->getBitWidth() && \"Cannot extract to a larger integer!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/IR/IRBuilder.h"
, 1872, __extension__ __PRETTY_FUNCTION__))
1872 "Cannot extract to a larger integer!")(static_cast <bool> (ExtractedTy->getBitWidth() <=
IntTy->getBitWidth() && "Cannot extract to a larger integer!"
) ? void (0) : __assert_fail ("ExtractedTy->getBitWidth() <= IntTy->getBitWidth() && \"Cannot extract to a larger integer!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/IR/IRBuilder.h"
, 1872, __extension__ __PRETTY_FUNCTION__))
;
1873 if (ExtractedTy != IntTy) {
1874 V = CreateTrunc(V, ExtractedTy, Name + ".trunc");
1875 }
1876 return V;
1877 }
1878
1879private:
1880 /// \brief Helper function that creates an assume intrinsic call that
1881 /// represents an alignment assumption on the provided Ptr, Mask, Type
1882 /// and Offset.
1883 CallInst *CreateAlignmentAssumptionHelper(const DataLayout &DL,
1884 Value *PtrValue, Value *Mask,
1885 Type *IntPtrTy,
1886 Value *OffsetValue) {
1887 Value *PtrIntValue = CreatePtrToInt(PtrValue, IntPtrTy, "ptrint");
1888
1889 if (OffsetValue) {
1890 bool IsOffsetZero = false;
1891 if (ConstantInt *CI = dyn_cast<ConstantInt>(OffsetValue))
1892 IsOffsetZero = CI->isZero();
1893
1894 if (!IsOffsetZero) {
1895 if (OffsetValue->getType() != IntPtrTy)
1896 OffsetValue = CreateIntCast(OffsetValue, IntPtrTy, /*isSigned*/ true,
1897 "offsetcast");
1898 PtrIntValue = CreateSub(PtrIntValue, OffsetValue, "offsetptr");
1899 }
1900 }
1901
1902 Value *Zero = ConstantInt::get(IntPtrTy, 0);
1903 Value *MaskedPtr = CreateAnd(PtrIntValue, Mask, "maskedptr");
1904 Value *InvCond = CreateICmpEQ(MaskedPtr, Zero, "maskcond");
1905 return CreateAssumption(InvCond);
1906 }
1907
1908public:
1909 /// \brief Create an assume intrinsic call that represents an alignment
1910 /// assumption on the provided pointer.
1911 ///
1912 /// An optional offset can be provided, and if it is provided, the offset
1913 /// must be subtracted from the provided pointer to get the pointer with the
1914 /// specified alignment.
1915 CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue,
1916 unsigned Alignment,
1917 Value *OffsetValue = nullptr) {
1918 assert(isa<PointerType>(PtrValue->getType()) &&(static_cast <bool> (isa<PointerType>(PtrValue->
getType()) && "trying to create an alignment assumption on a non-pointer?"
) ? void (0) : __assert_fail ("isa<PointerType>(PtrValue->getType()) && \"trying to create an alignment assumption on a non-pointer?\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/IR/IRBuilder.h"
, 1919, __extension__ __PRETTY_FUNCTION__))
1919 "trying to create an alignment assumption on a non-pointer?")(static_cast <bool> (isa<PointerType>(PtrValue->
getType()) && "trying to create an alignment assumption on a non-pointer?"
) ? void (0) : __assert_fail ("isa<PointerType>(PtrValue->getType()) && \"trying to create an alignment assumption on a non-pointer?\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/IR/IRBuilder.h"
, 1919, __extension__ __PRETTY_FUNCTION__))
;
1920 PointerType *PtrTy = cast<PointerType>(PtrValue->getType());
1921 Type *IntPtrTy = getIntPtrTy(DL, PtrTy->getAddressSpace());
1922
1923 Value *Mask = ConstantInt::get(IntPtrTy, Alignment > 0 ? Alignment - 1 : 0);
1924 return CreateAlignmentAssumptionHelper(DL, PtrValue, Mask, IntPtrTy,
1925 OffsetValue);
1926 }
1927 //
1928 /// \brief Create an assume intrinsic call that represents an alignment
1929 /// assumption on the provided pointer.
1930 ///
1931 /// An optional offset can be provided, and if it is provided, the offset
1932 /// must be subtracted from the provided pointer to get the pointer with the
1933 /// specified alignment.
1934 ///
1935 /// This overload handles the condition where the Alignment is dependent
1936 /// on an existing value rather than a static value.
1937 CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue,
1938 Value *Alignment,
1939 Value *OffsetValue = nullptr) {
1940 assert(isa<PointerType>(PtrValue->getType()) &&(static_cast <bool> (isa<PointerType>(PtrValue->
getType()) && "trying to create an alignment assumption on a non-pointer?"
) ? void (0) : __assert_fail ("isa<PointerType>(PtrValue->getType()) && \"trying to create an alignment assumption on a non-pointer?\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/IR/IRBuilder.h"
, 1941, __extension__ __PRETTY_FUNCTION__))
1941 "trying to create an alignment assumption on a non-pointer?")(static_cast <bool> (isa<PointerType>(PtrValue->
getType()) && "trying to create an alignment assumption on a non-pointer?"
) ? void (0) : __assert_fail ("isa<PointerType>(PtrValue->getType()) && \"trying to create an alignment assumption on a non-pointer?\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/IR/IRBuilder.h"
, 1941, __extension__ __PRETTY_FUNCTION__))
;
1942 PointerType *PtrTy = cast<PointerType>(PtrValue->getType());
1943 Type *IntPtrTy = getIntPtrTy(DL, PtrTy->getAddressSpace());
1944
1945 if (Alignment->getType() != IntPtrTy)
1946 Alignment = CreateIntCast(Alignment, IntPtrTy, /*isSigned*/ true,
1947 "alignmentcast");
1948 Value *IsPositive =
1949 CreateICmp(CmpInst::ICMP_SGT, Alignment,
1950 ConstantInt::get(Alignment->getType(), 0), "ispositive");
1951 Value *PositiveMask =
1952 CreateSub(Alignment, ConstantInt::get(IntPtrTy, 1), "positivemask");
1953 Value *Mask = CreateSelect(IsPositive, PositiveMask,
1954 ConstantInt::get(IntPtrTy, 0), "mask");
1955
1956 return CreateAlignmentAssumptionHelper(DL, PtrValue, Mask, IntPtrTy,
1957 OffsetValue);
1958 }
1959};
1960
1961// Create wrappers for C Binding types (see CBindingWrapping.h).
1962DEFINE_SIMPLE_CONVERSION_FUNCTIONS(IRBuilder<>, LLVMBuilderRef)inline IRBuilder<> *unwrap(LLVMBuilderRef P) { return reinterpret_cast
<IRBuilder<>*>(P); } inline LLVMBuilderRef wrap(const
IRBuilder<> *P) { return reinterpret_cast<LLVMBuilderRef
>(const_cast<IRBuilder<>*>(P)); }
1963
1964} // end namespace llvm
1965
1966#endif // LLVM_IR_IRBUILDER_H

/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/Support/Casting.h

1//===- llvm/Support/Casting.h - Allow flexible, checked, casts --*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the isa<X>(), cast<X>(), dyn_cast<X>(), cast_or_null<X>(),
11// and dyn_cast_or_null<X>() templates.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_SUPPORT_CASTING_H
16#define LLVM_SUPPORT_CASTING_H
17
18#include "llvm/Support/Compiler.h"
19#include "llvm/Support/type_traits.h"
20#include <cassert>
21#include <memory>
22#include <type_traits>
23
24namespace llvm {
25
26//===----------------------------------------------------------------------===//
27// isa<x> Support Templates
28//===----------------------------------------------------------------------===//
29
30// Define a template that can be specialized by smart pointers to reflect the
31// fact that they are automatically dereferenced, and are not involved with the
32// template selection process... the default implementation is a noop.
33//
34template<typename From> struct simplify_type {
35 using SimpleType = From; // The real type this represents...
36
37 // An accessor to get the real value...
38 static SimpleType &getSimplifiedValue(From &Val) { return Val; }
39};
40
41template<typename From> struct simplify_type<const From> {
42 using NonConstSimpleType = typename simplify_type<From>::SimpleType;
43 using SimpleType =
44 typename add_const_past_pointer<NonConstSimpleType>::type;
45 using RetType =
46 typename add_lvalue_reference_if_not_pointer<SimpleType>::type;
47
48 static RetType getSimplifiedValue(const From& Val) {
49 return simplify_type<From>::getSimplifiedValue(const_cast<From&>(Val));
18
Calling 'simplify_type::getSimplifiedValue'
19
Returning from 'simplify_type::getSimplifiedValue'
65
Calling 'simplify_type::getSimplifiedValue'
66
Returning from 'simplify_type::getSimplifiedValue'
111
Calling 'simplify_type::getSimplifiedValue'
112
Returning from 'simplify_type::getSimplifiedValue'
50 }
51};
52
53// The core of the implementation of isa<X> is here; To and From should be
54// the names of classes. This template can be specialized to customize the
55// implementation of isa<> without rewriting it from scratch.
56template <typename To, typename From, typename Enabler = void>
57struct isa_impl {
58 static inline bool doit(const From &Val) {
59 return To::classof(&Val);
60 }
61};
62
63/// \brief Always allow upcasts, and perform no dynamic check for them.
64template <typename To, typename From>
65struct isa_impl<
66 To, From, typename std::enable_if<std::is_base_of<To, From>::value>::type> {
67 static inline bool doit(const From &) { return true; }
68};
69
70template <typename To, typename From> struct isa_impl_cl {
71 static inline bool doit(const From &Val) {
72 return isa_impl<To, From>::doit(Val);
73 }
74};
75
76template <typename To, typename From> struct isa_impl_cl<To, const From> {
77 static inline bool doit(const From &Val) {
78 return isa_impl<To, From>::doit(Val);
79 }
80};
81
82template <typename To, typename From>
83struct isa_impl_cl<To, const std::unique_ptr<From>> {
84 static inline bool doit(const std::unique_ptr<From> &Val) {
85 assert(Val && "isa<> used on a null pointer")(static_cast <bool> (Val && "isa<> used on a null pointer"
) ? void (0) : __assert_fail ("Val && \"isa<> used on a null pointer\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/Support/Casting.h"
, 85, __extension__ __PRETTY_FUNCTION__))
;
86 return isa_impl_cl<To, From>::doit(*Val);
87 }
88};
89
90template <typename To, typename From> struct isa_impl_cl<To, From*> {
91 static inline bool doit(const From *Val) {
92 assert(Val && "isa<> used on a null pointer")(static_cast <bool> (Val && "isa<> used on a null pointer"
) ? void (0) : __assert_fail ("Val && \"isa<> used on a null pointer\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/Support/Casting.h"
, 92, __extension__ __PRETTY_FUNCTION__))
;
93 return isa_impl<To, From>::doit(*Val);
94 }
95};
96
97template <typename To, typename From> struct isa_impl_cl<To, From*const> {
98 static inline bool doit(const From *Val) {
99 assert(Val && "isa<> used on a null pointer")(static_cast <bool> (Val && "isa<> used on a null pointer"
) ? void (0) : __assert_fail ("Val && \"isa<> used on a null pointer\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/Support/Casting.h"
, 99, __extension__ __PRETTY_FUNCTION__))
;
100 return isa_impl<To, From>::doit(*Val);
101 }
102};
103
104template <typename To, typename From> struct isa_impl_cl<To, const From*> {
105 static inline bool doit(const From *Val) {
106 assert(Val && "isa<> used on a null pointer")(static_cast <bool> (Val && "isa<> used on a null pointer"
) ? void (0) : __assert_fail ("Val && \"isa<> used on a null pointer\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/Support/Casting.h"
, 106, __extension__ __PRETTY_FUNCTION__))
;
23
Within the expansion of the macro 'assert':
a
Assuming 'Val' is non-null
70
Within the expansion of the macro 'assert':
a
Assuming 'Val' is non-null
116
Within the expansion of the macro 'assert':
a
Assuming 'Val' is non-null
107 return isa_impl<To, From>::doit(*Val);
24
Calling 'isa_impl::doit'
28
Returning from 'isa_impl::doit'
71
Calling 'isa_impl::doit'
75
Returning from 'isa_impl::doit'
117
Calling 'isa_impl::doit'
121
Returning from 'isa_impl::doit'
108 }
109};
110
111template <typename To, typename From> struct isa_impl_cl<To, const From*const> {
112 static inline bool doit(const From *Val) {
113 assert(Val && "isa<> used on a null pointer")(static_cast <bool> (Val && "isa<> used on a null pointer"
) ? void (0) : __assert_fail ("Val && \"isa<> used on a null pointer\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/Support/Casting.h"
, 113, __extension__ __PRETTY_FUNCTION__))
;
114 return isa_impl<To, From>::doit(*Val);
115 }
116};
117
118template<typename To, typename From, typename SimpleFrom>
119struct isa_impl_wrap {
120 // When From != SimplifiedType, we can simplify the type some more by using
121 // the simplify_type template.
122 static bool doit(const From &Val) {
123 return isa_impl_wrap<To, SimpleFrom,
21
Calling 'isa_impl_wrap::doit'
30
Returning from 'isa_impl_wrap::doit'
68
Calling 'isa_impl_wrap::doit'
77
Returning from 'isa_impl_wrap::doit'
114
Calling 'isa_impl_wrap::doit'
123
Returning from 'isa_impl_wrap::doit'
124 typename simplify_type<SimpleFrom>::SimpleType>::doit(
125 simplify_type<const From>::getSimplifiedValue(Val));
17
Calling 'simplify_type::getSimplifiedValue'
20
Returning from 'simplify_type::getSimplifiedValue'
64
Calling 'simplify_type::getSimplifiedValue'
67
Returning from 'simplify_type::getSimplifiedValue'
110
Calling 'simplify_type::getSimplifiedValue'
113
Returning from 'simplify_type::getSimplifiedValue'
126 }
127};
128
129template<typename To, typename FromTy>
130struct isa_impl_wrap<To, FromTy, FromTy> {
131 // When From == SimpleType, we are as simple as we are going to get.
132 static bool doit(const FromTy &Val) {
133 return isa_impl_cl<To,FromTy>::doit(Val);
22
Calling 'isa_impl_cl::doit'
29
Returning from 'isa_impl_cl::doit'
69
Calling 'isa_impl_cl::doit'
76
Returning from 'isa_impl_cl::doit'
115
Calling 'isa_impl_cl::doit'
122
Returning from 'isa_impl_cl::doit'
134 }
135};
136
137// isa<X> - Return true if the parameter to the template is an instance of the
138// template type argument. Used like this:
139//
140// if (isa<Type>(myVal)) { ... }
141//
142template <class X, class Y> LLVM_NODISCARD[[clang::warn_unused_result]] inline bool isa(const Y &Val) {
143 return isa_impl_wrap<X, const Y,
16
Calling 'isa_impl_wrap::doit'
31
Returning from 'isa_impl_wrap::doit'
63
Calling 'isa_impl_wrap::doit'
78
Returning from 'isa_impl_wrap::doit'
109
Calling 'isa_impl_wrap::doit'
124
Returning from 'isa_impl_wrap::doit'
144 typename simplify_type<const Y>::SimpleType>::doit(Val);
145}
146
147//===----------------------------------------------------------------------===//
148// cast<x> Support Templates
149//===----------------------------------------------------------------------===//
150
151template<class To, class From> struct cast_retty;
152
153// Calculate what type the 'cast' function should return, based on a requested
154// type of To and a source type of From.
155template<class To, class From> struct cast_retty_impl {
156 using ret_type = To &; // Normal case, return Ty&
157};
158template<class To, class From> struct cast_retty_impl<To, const From> {
159 using ret_type = const To &; // Normal case, return Ty&
160};
161
162template<class To, class From> struct cast_retty_impl<To, From*> {
163 using ret_type = To *; // Pointer arg case, return Ty*
164};
165
166template<class To, class From> struct cast_retty_impl<To, const From*> {
167 using ret_type = const To *; // Constant pointer arg case, return const Ty*
168};
169
170template<class To, class From> struct cast_retty_impl<To, const From*const> {
171 using ret_type = const To *; // Constant pointer arg case, return const Ty*
172};
173
174template <class To, class From>
175struct cast_retty_impl<To, std::unique_ptr<From>> {
176private:
177 using PointerType = typename cast_retty_impl<To, From *>::ret_type;
178 using ResultType = typename std::remove_pointer<PointerType>::type;
179
180public:
181 using ret_type = std::unique_ptr<ResultType>;
182};
183
184template<class To, class From, class SimpleFrom>
185struct cast_retty_wrap {
186 // When the simplified type and the from type are not the same, use the type
187 // simplifier to reduce the type, then reuse cast_retty_impl to get the
188 // resultant type.
189 using ret_type = typename cast_retty<To, SimpleFrom>::ret_type;
190};
191
192template<class To, class FromTy>
193struct cast_retty_wrap<To, FromTy, FromTy> {
194 // When the simplified type is equal to the from type, use it directly.
195 using ret_type = typename cast_retty_impl<To,FromTy>::ret_type;
196};
197
198template<class To, class From>
199struct cast_retty {
200 using ret_type = typename cast_retty_wrap<
201 To, From, typename simplify_type<From>::SimpleType>::ret_type;
202};
203
204// Ensure the non-simple values are converted using the simplify_type template
205// that may be specialized by smart pointers...
206//
207template<class To, class From, class SimpleFrom> struct cast_convert_val {
208 // This is not a simple type, use the template to simplify it...
209 static typename cast_retty<To, From>::ret_type doit(From &Val) {
210 return cast_convert_val<To, SimpleFrom,
211 typename simplify_type<SimpleFrom>::SimpleType>::doit(
212 simplify_type<From>::getSimplifiedValue(Val));
213 }
214};
215
216template<class To, class FromTy> struct cast_convert_val<To,FromTy,FromTy> {
217 // This _is_ a simple type, just cast it.
218 static typename cast_retty<To, FromTy>::ret_type doit(const FromTy &Val) {
219 typename cast_retty<To, FromTy>::ret_type Res2
220 = (typename cast_retty<To, FromTy>::ret_type)const_cast<FromTy&>(Val);
221 return Res2;
222 }
223};
224
225template <class X> struct is_simple_type {
226 static const bool value =
227 std::is_same<X, typename simplify_type<X>::SimpleType>::value;
228};
229
230// cast<X> - Return the argument parameter cast to the specified type. This
231// casting operator asserts that the type is correct, so it does not return null
232// on failure. It does not allow a null argument (use cast_or_null for that).
233// It is typically used like this:
234//
235// cast<Instruction>(myVal)->getParent()
236//
237template <class X, class Y>
238inline typename std::enable_if<!is_simple_type<Y>::value,
239 typename cast_retty<X, const Y>::ret_type>::type
240cast(const Y &Val) {
241 assert(isa<X>(Val) && "cast<Ty>() argument of incompatible type!")(static_cast <bool> (isa<X>(Val) && "cast<Ty>() argument of incompatible type!"
) ? void (0) : __assert_fail ("isa<X>(Val) && \"cast<Ty>() argument of incompatible type!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/Support/Casting.h"
, 241, __extension__ __PRETTY_FUNCTION__))
;
242 return cast_convert_val<
243 X, const Y, typename simplify_type<const Y>::SimpleType>::doit(Val);
244}
245
246template <class X, class Y>
247inline typename cast_retty<X, Y>::ret_type cast(Y &Val) {
248 assert(isa<X>(Val) && "cast<Ty>() argument of incompatible type!")(static_cast <bool> (isa<X>(Val) && "cast<Ty>() argument of incompatible type!"
) ? void (0) : __assert_fail ("isa<X>(Val) && \"cast<Ty>() argument of incompatible type!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/Support/Casting.h"
, 248, __extension__ __PRETTY_FUNCTION__))
;
249 return cast_convert_val<X, Y,
250 typename simplify_type<Y>::SimpleType>::doit(Val);
251}
252
253template <class X, class Y>
254inline typename cast_retty<X, Y *>::ret_type cast(Y *Val) {
255 assert(isa<X>(Val) && "cast<Ty>() argument of incompatible type!")(static_cast <bool> (isa<X>(Val) && "cast<Ty>() argument of incompatible type!"
) ? void (0) : __assert_fail ("isa<X>(Val) && \"cast<Ty>() argument of incompatible type!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/Support/Casting.h"
, 255, __extension__ __PRETTY_FUNCTION__))
;
256 return cast_convert_val<X, Y*,
257 typename simplify_type<Y*>::SimpleType>::doit(Val);
258}
259
260template <class X, class Y>
261inline typename cast_retty<X, std::unique_ptr<Y>>::ret_type
262cast(std::unique_ptr<Y> &&Val) {
263 assert(isa<X>(Val.get()) && "cast<Ty>() argument of incompatible type!")(static_cast <bool> (isa<X>(Val.get()) &&
"cast<Ty>() argument of incompatible type!") ? void (0
) : __assert_fail ("isa<X>(Val.get()) && \"cast<Ty>() argument of incompatible type!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/Support/Casting.h"
, 263, __extension__ __PRETTY_FUNCTION__))
;
264 using ret_type = typename cast_retty<X, std::unique_ptr<Y>>::ret_type;
265 return ret_type(
266 cast_convert_val<X, Y *, typename simplify_type<Y *>::SimpleType>::doit(
267 Val.release()));
268}
269
270// cast_or_null<X> - Functionally identical to cast, except that a null value is
271// accepted.
272//
273template <class X, class Y>
274LLVM_NODISCARD[[clang::warn_unused_result]] inline
275 typename std::enable_if<!is_simple_type<Y>::value,
276 typename cast_retty<X, const Y>::ret_type>::type
277 cast_or_null(const Y &Val) {
278 if (!Val)
279 return nullptr;
280 assert(isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!")(static_cast <bool> (isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!"
) ? void (0) : __assert_fail ("isa<X>(Val) && \"cast_or_null<Ty>() argument of incompatible type!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/Support/Casting.h"
, 280, __extension__ __PRETTY_FUNCTION__))
;
281 return cast<X>(Val);
282}
283
284template <class X, class Y>
285LLVM_NODISCARD[[clang::warn_unused_result]] inline
286 typename std::enable_if<!is_simple_type<Y>::value,
287 typename cast_retty<X, Y>::ret_type>::type
288 cast_or_null(Y &Val) {
289 if (!Val)
290 return nullptr;
291 assert(isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!")(static_cast <bool> (isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!"
) ? void (0) : __assert_fail ("isa<X>(Val) && \"cast_or_null<Ty>() argument of incompatible type!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/Support/Casting.h"
, 291, __extension__ __PRETTY_FUNCTION__))
;
292 return cast<X>(Val);
293}
294
295template <class X, class Y>
296LLVM_NODISCARD[[clang::warn_unused_result]] inline typename cast_retty<X, Y *>::ret_type
297cast_or_null(Y *Val) {
298 if (!Val) return nullptr;
299 assert(isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!")(static_cast <bool> (isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!"
) ? void (0) : __assert_fail ("isa<X>(Val) && \"cast_or_null<Ty>() argument of incompatible type!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/Support/Casting.h"
, 299, __extension__ __PRETTY_FUNCTION__))
;
300 return cast<X>(Val);
301}
302
303template <class X, class Y>
304inline typename cast_retty<X, std::unique_ptr<Y>>::ret_type
305cast_or_null(std::unique_ptr<Y> &&Val) {
306 if (!Val)
307 return nullptr;
308 return cast<X>(std::move(Val));
309}
310
311// dyn_cast<X> - Return the argument parameter cast to the specified type. This
312// casting operator returns null if the argument is of the wrong type, so it can
313// be used to test for a type as well as cast if successful. This should be
314// used in the context of an if statement like this:
315//
316// if (const Instruction *I = dyn_cast<Instruction>(myVal)) { ... }
317//
318
319template <class X, class Y>
320LLVM_NODISCARD[[clang::warn_unused_result]] inline
321 typename std::enable_if<!is_simple_type<Y>::value,
322 typename cast_retty<X, const Y>::ret_type>::type
323 dyn_cast(const Y &Val) {
324 return isa<X>(Val) ? cast<X>(Val) : nullptr;
325}
326
327template <class X, class Y>
328LLVM_NODISCARD[[clang::warn_unused_result]] inline typename cast_retty<X, Y>::ret_type dyn_cast(Y &Val) {
329 return isa<X>(Val) ? cast<X>(Val) : nullptr;
330}
331
332template <class X, class Y>
333LLVM_NODISCARD[[clang::warn_unused_result]] inline typename cast_retty<X, Y *>::ret_type dyn_cast(Y *Val) {
334 return isa<X>(Val) ? cast<X>(Val) : nullptr;
15
Calling 'isa'
32
Returning from 'isa'
33
'?' condition is false
62
Calling 'isa'
79
Returning from 'isa'
80
'?' condition is false
108
Calling 'isa'
125
Returning from 'isa'
126
'?' condition is false
335}
336
337// dyn_cast_or_null<X> - Functionally identical to dyn_cast, except that a null
338// value is accepted.
339//
340template <class X, class Y>
341LLVM_NODISCARD[[clang::warn_unused_result]] inline
342 typename std::enable_if<!is_simple_type<Y>::value,
343 typename cast_retty<X, const Y>::ret_type>::type
344 dyn_cast_or_null(const Y &Val) {
345 return (Val && isa<X>(Val)) ? cast<X>(Val) : nullptr;
346}
347
348template <class X, class Y>
349LLVM_NODISCARD[[clang::warn_unused_result]] inline
350 typename std::enable_if<!is_simple_type<Y>::value,
351 typename cast_retty<X, Y>::ret_type>::type
352 dyn_cast_or_null(Y &Val) {
353 return (Val && isa<X>(Val)) ? cast<X>(Val) : nullptr;
354}
355
356template <class X, class Y>
357LLVM_NODISCARD[[clang::warn_unused_result]] inline typename cast_retty<X, Y *>::ret_type
358dyn_cast_or_null(Y *Val) {
359 return (Val && isa<X>(Val)) ? cast<X>(Val) : nullptr;
360}
361
362// unique_dyn_cast<X> - Given a unique_ptr<Y>, try to return a unique_ptr<X>,
363// taking ownership of the input pointer iff isa<X>(Val) is true. If the
364// cast is successful, From refers to nullptr on exit and the casted value
365// is returned. If the cast is unsuccessful, the function returns nullptr
366// and From is unchanged.
367template <class X, class Y>
368LLVM_NODISCARD[[clang::warn_unused_result]] inline auto unique_dyn_cast(std::unique_ptr<Y> &Val)
369 -> decltype(cast<X>(Val)) {
370 if (!isa<X>(Val))
371 return nullptr;
372 return cast<X>(std::move(Val));
373}
374
375template <class X, class Y>
376LLVM_NODISCARD[[clang::warn_unused_result]] inline auto unique_dyn_cast(std::unique_ptr<Y> &&Val)
377 -> decltype(cast<X>(Val)) {
378 return unique_dyn_cast<X, Y>(Val);
379}
380
381// dyn_cast_or_null<X> - Functionally identical to unique_dyn_cast, except that
382// a null value is accepted.
383template <class X, class Y>
384LLVM_NODISCARD[[clang::warn_unused_result]] inline auto unique_dyn_cast_or_null(std::unique_ptr<Y> &Val)
385 -> decltype(cast<X>(Val)) {
386 if (!Val)
387 return nullptr;
388 return unique_dyn_cast<X, Y>(Val);
389}
390
391template <class X, class Y>
392LLVM_NODISCARD[[clang::warn_unused_result]] inline auto unique_dyn_cast_or_null(std::unique_ptr<Y> &&Val)
393 -> decltype(cast<X>(Val)) {
394 return unique_dyn_cast_or_null<X, Y>(Val);
395}
396
397} // end namespace llvm
398
399#endif // LLVM_SUPPORT_CASTING_H

/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/IR/Value.h

1//===- llvm/Value.h - Definition of the Value class -------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file declares the Value class.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_IR_VALUE_H
15#define LLVM_IR_VALUE_H
16
17#include "llvm-c/Types.h"
18#include "llvm/ADT/iterator_range.h"
19#include "llvm/IR/Use.h"
20#include "llvm/Support/CBindingWrapping.h"
21#include "llvm/Support/Casting.h"
22#include <cassert>
23#include <iterator>
24#include <memory>
25
26namespace llvm {
27
28class APInt;
29class Argument;
30class BasicBlock;
31class Constant;
32class ConstantData;
33class ConstantAggregate;
34class DataLayout;
35class Function;
36class GlobalAlias;
37class GlobalIFunc;
38class GlobalIndirectSymbol;
39class GlobalObject;
40class GlobalValue;
41class GlobalVariable;
42class InlineAsm;
43class Instruction;
44class LLVMContext;
45class Module;
46class ModuleSlotTracker;
47class raw_ostream;
48template<typename ValueTy> class StringMapEntry;
49class StringRef;
50class Twine;
51class Type;
52class User;
53
54using ValueName = StringMapEntry<Value *>;
55
56//===----------------------------------------------------------------------===//
57// Value Class
58//===----------------------------------------------------------------------===//
59
60/// \brief LLVM Value Representation
61///
62/// This is a very important LLVM class. It is the base class of all values
63/// computed by a program that may be used as operands to other values. Value is
64/// the super class of other important classes such as Instruction and Function.
65/// All Values have a Type. Type is not a subclass of Value. Some values can
66/// have a name and they belong to some Module. Setting the name on the Value
67/// automatically updates the module's symbol table.
68///
69/// Every value has a "use list" that keeps track of which other Values are
70/// using this Value. A Value can also have an arbitrary number of ValueHandle
71/// objects that watch it and listen to RAUW and Destroy events. See
72/// llvm/IR/ValueHandle.h for details.
73class Value {
74 // The least-significant bit of the first word of Value *must* be zero:
75 // http://www.llvm.org/docs/ProgrammersManual.html#the-waymarking-algorithm
76 Type *VTy;
77 Use *UseList;
78
79 friend class ValueAsMetadata; // Allow access to IsUsedByMD.
80 friend class ValueHandleBase;
81
82 const unsigned char SubclassID; // Subclass identifier (for isa/dyn_cast)
83 unsigned char HasValueHandle : 1; // Has a ValueHandle pointing to this?
84
85protected:
86 /// \brief Hold subclass data that can be dropped.
87 ///
88 /// This member is similar to SubclassData, however it is for holding
89 /// information which may be used to aid optimization, but which may be
90 /// cleared to zero without affecting conservative interpretation.
91 unsigned char SubclassOptionalData : 7;
92
93private:
94 /// \brief Hold arbitrary subclass data.
95 ///
96 /// This member is defined by this class, but is not used for anything.
97 /// Subclasses can use it to hold whatever state they find useful. This
98 /// field is initialized to zero by the ctor.
99 unsigned short SubclassData;
100
101protected:
102 /// \brief The number of operands in the subclass.
103 ///
104 /// This member is defined by this class, but not used for anything.
105 /// Subclasses can use it to store their number of operands, if they have
106 /// any.
107 ///
108 /// This is stored here to save space in User on 64-bit hosts. Since most
109 /// instances of Value have operands, 32-bit hosts aren't significantly
110 /// affected.
111 ///
112 /// Note, this should *NOT* be used directly by any class other than User.
113 /// User uses this value to find the Use list.
114 enum : unsigned { NumUserOperandsBits = 28 };
115 unsigned NumUserOperands : NumUserOperandsBits;
116
117 // Use the same type as the bitfield above so that MSVC will pack them.
118 unsigned IsUsedByMD : 1;
119 unsigned HasName : 1;
120 unsigned HasHungOffUses : 1;
121 unsigned HasDescriptor : 1;
122
123private:
124 template <typename UseT> // UseT == 'Use' or 'const Use'
125 class use_iterator_impl
126 : public std::iterator<std::forward_iterator_tag, UseT *> {
127 friend class Value;
128
129 UseT *U;
130
131 explicit use_iterator_impl(UseT *u) : U(u) {}
132
133 public:
134 use_iterator_impl() : U() {}
135
136 bool operator==(const use_iterator_impl &x) const { return U == x.U; }
137 bool operator!=(const use_iterator_impl &x) const { return !operator==(x); }
138
139 use_iterator_impl &operator++() { // Preincrement
140 assert(U && "Cannot increment end iterator!")(static_cast <bool> (U && "Cannot increment end iterator!"
) ? void (0) : __assert_fail ("U && \"Cannot increment end iterator!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/IR/Value.h"
, 140, __extension__ __PRETTY_FUNCTION__))
;
141 U = U->getNext();
142 return *this;
143 }
144
145 use_iterator_impl operator++(int) { // Postincrement
146 auto tmp = *this;
147 ++*this;
148 return tmp;
149 }
150
151 UseT &operator*() const {
152 assert(U && "Cannot dereference end iterator!")(static_cast <bool> (U && "Cannot dereference end iterator!"
) ? void (0) : __assert_fail ("U && \"Cannot dereference end iterator!\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/IR/Value.h"
, 152, __extension__ __PRETTY_FUNCTION__))
;
153 return *U;
154 }
155
156 UseT *operator->() const { return &operator*(); }
157
158 operator use_iterator_impl<const UseT>() const {
159 return use_iterator_impl<const UseT>(U);
160 }
161 };
162
163 template <typename UserTy> // UserTy == 'User' or 'const User'
164 class user_iterator_impl
165 : public std::iterator<std::forward_iterator_tag, UserTy *> {
166 use_iterator_impl<Use> UI;
167 explicit user_iterator_impl(Use *U) : UI(U) {}
168 friend class Value;
169
170 public:
171 user_iterator_impl() = default;
172
173 bool operator==(const user_iterator_impl &x) const { return UI == x.UI; }
174 bool operator!=(const user_iterator_impl &x) const { return !operator==(x); }
175
176 /// \brief Returns true if this iterator is equal to user_end() on the value.
177 bool atEnd() const { return *this == user_iterator_impl(); }
178
179 user_iterator_impl &operator++() { // Preincrement
180 ++UI;
181 return *this;
182 }
183
184 user_iterator_impl operator++(int) { // Postincrement
185 auto tmp = *this;
186 ++*this;
187 return tmp;
188 }
189
190 // Retrieve a pointer to the current User.
191 UserTy *operator*() const {
192 return UI->getUser();
193 }
194
195 UserTy *operator->() const { return operator*(); }
196
197 operator user_iterator_impl<const UserTy>() const {
198 return user_iterator_impl<const UserTy>(*UI);
199 }
200
201 Use &getUse() const { return *UI; }
202 };
203
204protected:
205 Value(Type *Ty, unsigned scid);
206
207 /// Value's destructor should be virtual by design, but that would require
208 /// that Value and all of its subclasses have a vtable that effectively
209 /// duplicates the information in the value ID. As a size optimization, the
210 /// destructor has been protected, and the caller should manually call
211 /// deleteValue.
212 ~Value(); // Use deleteValue() to delete a generic Value.
213
214public:
215 Value(const Value &) = delete;
216 Value &operator=(const Value &) = delete;
217
218 /// Delete a pointer to a generic Value.
219 void deleteValue();
220
221 /// \brief Support for debugging, callable in GDB: V->dump()
222 void dump() const;
223
224 /// \brief Implement operator<< on Value.
225 /// @{
226 void print(raw_ostream &O, bool IsForDebug = false) const;
227 void print(raw_ostream &O, ModuleSlotTracker &MST,
228 bool IsForDebug = false) const;
229 /// @}
230
231 /// \brief Print the name of this Value out to the specified raw_ostream.
232 ///
233 /// This is useful when you just want to print 'int %reg126', not the
234 /// instruction that generated it. If you specify a Module for context, then
235 /// even constanst get pretty-printed; for example, the type of a null
236 /// pointer is printed symbolically.
237 /// @{
238 void printAsOperand(raw_ostream &O, bool PrintType = true,
239 const Module *M = nullptr) const;
240 void printAsOperand(raw_ostream &O, bool PrintType,
241 ModuleSlotTracker &MST) const;
242 /// @}
243
244 /// \brief All values are typed, get the type of this value.
245 Type *getType() const { return VTy; }
246
247 /// \brief All values hold a context through their type.
248 LLVMContext &getContext() const;
249
250 // \brief All values can potentially be named.
251 bool hasName() const { return HasName; }
252 ValueName *getValueName() const;
253 void setValueName(ValueName *VN);
254
255private:
256 void destroyValueName();
257 void doRAUW(Value *New, bool NoMetadata);
258 void setNameImpl(const Twine &Name);
259
260public:
261 /// \brief Return a constant reference to the value's name.
262 ///
263 /// This guaranteed to return the same reference as long as the value is not
264 /// modified. If the value has a name, this does a hashtable lookup, so it's
265 /// not free.
266 StringRef getName() const;
267
268 /// \brief Change the name of the value.
269 ///
270 /// Choose a new unique name if the provided name is taken.
271 ///
272 /// \param Name The new name; or "" if the value's name should be removed.
273 void setName(const Twine &Name);
274
275 /// \brief Transfer the name from V to this value.
276 ///
277 /// After taking V's name, sets V's name to empty.
278 ///
279 /// \note It is an error to call V->takeName(V).
280 void takeName(Value *V);
281
282 /// \brief Change all uses of this to point to a new Value.
283 ///
284 /// Go through the uses list for this definition and make each use point to
285 /// "V" instead of "this". After this completes, 'this's use list is
286 /// guaranteed to be empty.
287 void replaceAllUsesWith(Value *V);
288
289 /// \brief Change non-metadata uses of this to point to a new Value.
290 ///
291 /// Go through the uses list for this definition and make each use point to
292 /// "V" instead of "this". This function skips metadata entries in the list.
293 void replaceNonMetadataUsesWith(Value *V);
294
295 /// replaceUsesOutsideBlock - Go through the uses list for this definition and
296 /// make each use point to "V" instead of "this" when the use is outside the
297 /// block. 'This's use list is expected to have at least one element.
298 /// Unlike replaceAllUsesWith this function does not support basic block
299 /// values or constant users.
300 void replaceUsesOutsideBlock(Value *V, BasicBlock *BB);
301
302 /// replaceUsesExceptBlockAddr - Go through the uses list for this definition
303 /// and make each use point to "V" instead of "this" when the use is outside
304 /// the block. 'This's use list is expected to have at least one element.
305 /// Unlike replaceAllUsesWith this function skips blockaddr uses.
306 void replaceUsesExceptBlockAddr(Value *New);
307
308 //----------------------------------------------------------------------
309 // Methods for handling the chain of uses of this Value.
310 //
311 // Materializing a function can introduce new uses, so these methods come in
312 // two variants:
313 // The methods that start with materialized_ check the uses that are
314 // currently known given which functions are materialized. Be very careful
315 // when using them since you might not get all uses.
316 // The methods that don't start with materialized_ assert that modules is
317 // fully materialized.
318 void assertModuleIsMaterializedImpl() const;
319 // This indirection exists so we can keep assertModuleIsMaterializedImpl()
320 // around in release builds of Value.cpp to be linked with other code built
321 // in debug mode. But this avoids calling it in any of the release built code.
322 void assertModuleIsMaterialized() const {
323#ifndef NDEBUG
324 assertModuleIsMaterializedImpl();
325#endif
326 }
327
328 bool use_empty() const {
329 assertModuleIsMaterialized();
330 return UseList == nullptr;
331 }
332
333 bool materialized_use_empty() const {
334 return UseList == nullptr;
335 }
336
337 using use_iterator = use_iterator_impl<Use>;
338 using const_use_iterator = use_iterator_impl<const Use>;
339
340 use_iterator materialized_use_begin() { return use_iterator(UseList); }
341 const_use_iterator materialized_use_begin() const {
342 return const_use_iterator(UseList);
343 }
344 use_iterator use_begin() {
345 assertModuleIsMaterialized();
346 return materialized_use_begin();
347 }
348 const_use_iterator use_begin() const {
349 assertModuleIsMaterialized();
350 return materialized_use_begin();
351 }
352 use_iterator use_end() { return use_iterator(); }
353 const_use_iterator use_end() const { return const_use_iterator(); }
354 iterator_range<use_iterator> materialized_uses() {
355 return make_range(materialized_use_begin(), use_end());
356 }
357 iterator_range<const_use_iterator> materialized_uses() const {
358 return make_range(materialized_use_begin(), use_end());
359 }
360 iterator_range<use_iterator> uses() {
361 assertModuleIsMaterialized();
362 return materialized_uses();
363 }
364 iterator_range<const_use_iterator> uses() const {
365 assertModuleIsMaterialized();
366 return materialized_uses();
367 }
368
369 bool user_empty() const {
370 assertModuleIsMaterialized();
371 return UseList == nullptr;
372 }
373
374 using user_iterator = user_iterator_impl<User>;
375 using const_user_iterator = user_iterator_impl<const User>;
376
377 user_iterator materialized_user_begin() { return user_iterator(UseList); }
378 const_user_iterator materialized_user_begin() const {
379 return const_user_iterator(UseList);
380 }
381 user_iterator user_begin() {
382 assertModuleIsMaterialized();
383 return materialized_user_begin();
384 }
385 const_user_iterator user_begin() const {
386 assertModuleIsMaterialized();
387 return materialized_user_begin();
388 }
389 user_iterator user_end() { return user_iterator(); }
390 const_user_iterator user_end() const { return const_user_iterator(); }
391 User *user_back() {
392 assertModuleIsMaterialized();
393 return *materialized_user_begin();
394 }
395 const User *user_back() const {
396 assertModuleIsMaterialized();
397 return *materialized_user_begin();
398 }
399 iterator_range<user_iterator> materialized_users() {
400 return make_range(materialized_user_begin(), user_end());
401 }
402 iterator_range<const_user_iterator> materialized_users() const {
403 return make_range(materialized_user_begin(), user_end());
404 }
405 iterator_range<user_iterator> users() {
406 assertModuleIsMaterialized();
407 return materialized_users();
408 }
409 iterator_range<const_user_iterator> users() const {
410 assertModuleIsMaterialized();
411 return materialized_users();
412 }
413
414 /// \brief Return true if there is exactly one user of this value.
415 ///
416 /// This is specialized because it is a common request and does not require
417 /// traversing the whole use list.
418 bool hasOneUse() const {
419 const_use_iterator I = use_begin(), E = use_end();
420 if (I == E) return false;
421 return ++I == E;
422 }
423
424 /// \brief Return true if this Value has exactly N users.
425 bool hasNUses(unsigned N) const;
426
427 /// \brief Return true if this value has N users or more.
428 ///
429 /// This is logically equivalent to getNumUses() >= N.
430 bool hasNUsesOrMore(unsigned N) const;
431
432 /// \brief Check if this value is used in the specified basic block.
433 bool isUsedInBasicBlock(const BasicBlock *BB) const;
434
435 /// \brief This method computes the number of uses of this Value.
436 ///
437 /// This is a linear time operation. Use hasOneUse, hasNUses, or
438 /// hasNUsesOrMore to check for specific values.
439 unsigned getNumUses() const;
440
441 /// \brief This method should only be used by the Use class.
442 void addUse(Use &U) { U.addToList(&UseList); }
443
444 /// \brief Concrete subclass of this.
445 ///
446 /// An enumeration for keeping track of the concrete subclass of Value that
447 /// is actually instantiated. Values of this enumeration are kept in the
448 /// Value classes SubclassID field. They are used for concrete type
449 /// identification.
450 enum ValueTy {
451#define HANDLE_VALUE(Name) Name##Val,
452#include "llvm/IR/Value.def"
453
454 // Markers:
455#define HANDLE_CONSTANT_MARKER(Marker, Constant) Marker = Constant##Val,
456#include "llvm/IR/Value.def"
457 };
458
459 /// \brief Return an ID for the concrete type of this object.
460 ///
461 /// This is used to implement the classof checks. This should not be used
462 /// for any other purpose, as the values may change as LLVM evolves. Also,
463 /// note that for instructions, the Instruction's opcode is added to
464 /// InstructionVal. So this means three things:
465 /// # there is no value with code InstructionVal (no opcode==0).
466 /// # there are more possible values for the value type than in ValueTy enum.
467 /// # the InstructionVal enumerator must be the highest valued enumerator in
468 /// the ValueTy enum.
469 unsigned getValueID() const {
470 return SubclassID;
471 }
472
473 /// \brief Return the raw optional flags value contained in this value.
474 ///
475 /// This should only be used when testing two Values for equivalence.
476 unsigned getRawSubclassOptionalData() const {
477 return SubclassOptionalData;
478 }
479
480 /// \brief Clear the optional flags contained in this value.
481 void clearSubclassOptionalData() {
482 SubclassOptionalData = 0;
483 }
484
485 /// \brief Check the optional flags for equality.
486 bool hasSameSubclassOptionalData(const Value *V) const {
487 return SubclassOptionalData == V->SubclassOptionalData;
488 }
489
490 /// \brief Return true if there is a value handle associated with this value.
491 bool hasValueHandle() const { return HasValueHandle; }
492
493 /// \brief Return true if there is metadata referencing this value.
494 bool isUsedByMetadata() const { return IsUsedByMD; }
495
496 /// \brief Return true if this value is a swifterror value.
497 ///
498 /// swifterror values can be either a function argument or an alloca with a
499 /// swifterror attribute.
500 bool isSwiftError() const;
501
502 /// \brief Strip off pointer casts, all-zero GEPs, and aliases.
503 ///
504 /// Returns the original uncasted value. If this is called on a non-pointer
505 /// value, it returns 'this'.
506 const Value *stripPointerCasts() const;
507 Value *stripPointerCasts() {
508 return const_cast<Value *>(
509 static_cast<const Value *>(this)->stripPointerCasts());
510 }
511
512 /// \brief Strip off pointer casts, all-zero GEPs, aliases and barriers.
513 ///
514 /// Returns the original uncasted value. If this is called on a non-pointer
515 /// value, it returns 'this'. This function should be used only in
516 /// Alias analysis.
517 const Value *stripPointerCastsAndBarriers() const;
518 Value *stripPointerCastsAndBarriers() {
519 return const_cast<Value *>(
520 static_cast<const Value *>(this)->stripPointerCastsAndBarriers());
521 }
522
523 /// \brief Strip off pointer casts and all-zero GEPs.
524 ///
525 /// Returns the original uncasted value. If this is called on a non-pointer
526 /// value, it returns 'this'.
527 const Value *stripPointerCastsNoFollowAliases() const;
528 Value *stripPointerCastsNoFollowAliases() {
529 return const_cast<Value *>(
530 static_cast<const Value *>(this)->stripPointerCastsNoFollowAliases());
531 }
532
533 /// \brief Strip off pointer casts and all-constant inbounds GEPs.
534 ///
535 /// Returns the original pointer value. If this is called on a non-pointer
536 /// value, it returns 'this'.
537 const Value *stripInBoundsConstantOffsets() const;
538 Value *stripInBoundsConstantOffsets() {
539 return const_cast<Value *>(
540 static_cast<const Value *>(this)->stripInBoundsConstantOffsets());
541 }
542
543 /// \brief Accumulate offsets from \a stripInBoundsConstantOffsets().
544 ///
545 /// Stores the resulting constant offset stripped into the APInt provided.
546 /// The provided APInt will be extended or truncated as needed to be the
547 /// correct bitwidth for an offset of this pointer type.
548 ///
549 /// If this is called on a non-pointer value, it returns 'this'.
550 const Value *stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL,
551 APInt &Offset) const;
552 Value *stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL,
553 APInt &Offset) {
554 return const_cast<Value *>(static_cast<const Value *>(this)
555 ->stripAndAccumulateInBoundsConstantOffsets(DL, Offset));
556 }
557
558 /// \brief Strip off pointer casts and inbounds GEPs.
559 ///
560 /// Returns the original pointer value. If this is called on a non-pointer
561 /// value, it returns 'this'.
562 const Value *stripInBoundsOffsets() const;
563 Value *stripInBoundsOffsets() {
564 return const_cast<Value *>(
565 static_cast<const Value *>(this)->stripInBoundsOffsets());
566 }
567
568 /// \brief Returns the number of bytes known to be dereferenceable for the
569 /// pointer value.
570 ///
571 /// If CanBeNull is set by this function the pointer can either be null or be
572 /// dereferenceable up to the returned number of bytes.
573 uint64_t getPointerDereferenceableBytes(const DataLayout &DL,
574 bool &CanBeNull) const;
575
576 /// \brief Returns an alignment of the pointer value.
577 ///
578 /// Returns an alignment which is either specified explicitly, e.g. via
579 /// align attribute of a function argument, or guaranteed by DataLayout.
580 unsigned getPointerAlignment(const DataLayout &DL) const;
581
582 /// \brief Translate PHI node to its predecessor from the given basic block.
583 ///
584 /// If this value is a PHI node with CurBB as its parent, return the value in
585 /// the PHI node corresponding to PredBB. If not, return ourself. This is
586 /// useful if you want to know the value something has in a predecessor
587 /// block.
588 const Value *DoPHITranslation(const BasicBlock *CurBB,
589 const BasicBlock *PredBB) const;
590 Value *DoPHITranslation(const BasicBlock *CurBB, const BasicBlock *PredBB) {
591 return const_cast<Value *>(
592 static_cast<const Value *>(this)->DoPHITranslation(CurBB, PredBB));
593 }
594
595 /// \brief The maximum alignment for instructions.
596 ///
597 /// This is the greatest alignment value supported by load, store, and alloca
598 /// instructions, and global values.
599 static const unsigned MaxAlignmentExponent = 29;
600 static const unsigned MaximumAlignment = 1u << MaxAlignmentExponent;
601
602 /// \brief Mutate the type of this Value to be of the specified type.
603 ///
604 /// Note that this is an extremely dangerous operation which can create
605 /// completely invalid IR very easily. It is strongly recommended that you
606 /// recreate IR objects with the right types instead of mutating them in
607 /// place.
608 void mutateType(Type *Ty) {
609 VTy = Ty;
610 }
611
612 /// \brief Sort the use-list.
613 ///
614 /// Sorts the Value's use-list by Cmp using a stable mergesort. Cmp is
615 /// expected to compare two \a Use references.
616 template <class Compare> void sortUseList(Compare Cmp);
617
618 /// \brief Reverse the use-list.
619 void reverseUseList();
620
621private:
622 /// \brief Merge two lists together.
623 ///
624 /// Merges \c L and \c R using \c Cmp. To enable stable sorts, always pushes
625 /// "equal" items from L before items from R.
626 ///
627 /// \return the first element in the list.
628 ///
629 /// \note Completely ignores \a Use::Prev (doesn't read, doesn't update).
630 template <class Compare>
631 static Use *mergeUseLists(Use *L, Use *R, Compare Cmp) {
632 Use *Merged;
633 Use **Next = &Merged;
634
635 while (true) {
636 if (!L) {
637 *Next = R;
638 break;
639 }
640 if (!R) {
641 *Next = L;
642 break;
643 }
644 if (Cmp(*R, *L)) {
645 *Next = R;
646 Next = &R->Next;
647 R = R->Next;
648 } else {
649 *Next = L;
650 Next = &L->Next;
651 L = L->Next;
652 }
653 }
654
655 return Merged;
656 }
657
658protected:
659 unsigned short getSubclassDataFromValue() const { return SubclassData; }
660 void setValueSubclassData(unsigned short D) { SubclassData = D; }
661};
662
663struct ValueDeleter { void operator()(Value *V) { V->deleteValue(); } };
664
665/// Use this instead of std::unique_ptr<Value> or std::unique_ptr<Instruction>.
666/// Those don't work because Value and Instruction's destructors are protected,
667/// aren't virtual, and won't destroy the complete object.
668using unique_value = std::unique_ptr<Value, ValueDeleter>;
669
670inline raw_ostream &operator<<(raw_ostream &OS, const Value &V) {
671 V.print(OS);
672 return OS;
673}
674
675void Use::set(Value *V) {
676 if (Val) removeFromList();
677 Val = V;
678 if (V) V->addUse(*this);
679}
680
681Value *Use::operator=(Value *RHS) {
682 set(RHS);
683 return RHS;
684}
685
686const Use &Use::operator=(const Use &RHS) {
687 set(RHS.Val);
688 return *this;
689}
690
691template <class Compare> void Value::sortUseList(Compare Cmp) {
692 if (!UseList || !UseList->Next)
693 // No need to sort 0 or 1 uses.
694 return;
695
696 // Note: this function completely ignores Prev pointers until the end when
697 // they're fixed en masse.
698
699 // Create a binomial vector of sorted lists, visiting uses one at a time and
700 // merging lists as necessary.
701 const unsigned MaxSlots = 32;
702 Use *Slots[MaxSlots];
703
704 // Collect the first use, turning it into a single-item list.
705 Use *Next = UseList->Next;
706 UseList->Next = nullptr;
707 unsigned NumSlots = 1;
708 Slots[0] = UseList;
709
710 // Collect all but the last use.
711 while (Next->Next) {
712 Use *Current = Next;
713 Next = Current->Next;
714
715 // Turn Current into a single-item list.
716 Current->Next = nullptr;
717
718 // Save Current in the first available slot, merging on collisions.
719 unsigned I;
720 for (I = 0; I < NumSlots; ++I) {
721 if (!Slots[I])
722 break;
723
724 // Merge two lists, doubling the size of Current and emptying slot I.
725 //
726 // Since the uses in Slots[I] originally preceded those in Current, send
727 // Slots[I] in as the left parameter to maintain a stable sort.
728 Current = mergeUseLists(Slots[I], Current, Cmp);
729 Slots[I] = nullptr;
730 }
731 // Check if this is a new slot.
732 if (I == NumSlots) {
733 ++NumSlots;
734 assert(NumSlots <= MaxSlots && "Use list bigger than 2^32")(static_cast <bool> (NumSlots <= MaxSlots &&
"Use list bigger than 2^32") ? void (0) : __assert_fail ("NumSlots <= MaxSlots && \"Use list bigger than 2^32\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/IR/Value.h"
, 734, __extension__ __PRETTY_FUNCTION__))
;
735 }
736
737 // Found an open slot.
738 Slots[I] = Current;
739 }
740
741 // Merge all the lists together.
742 assert(Next && "Expected one more Use")(static_cast <bool> (Next && "Expected one more Use"
) ? void (0) : __assert_fail ("Next && \"Expected one more Use\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/IR/Value.h"
, 742, __extension__ __PRETTY_FUNCTION__))
;
743 assert(!Next->Next && "Expected only one Use")(static_cast <bool> (!Next->Next && "Expected only one Use"
) ? void (0) : __assert_fail ("!Next->Next && \"Expected only one Use\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/IR/Value.h"
, 743, __extension__ __PRETTY_FUNCTION__))
;
744 UseList = Next;
745 for (unsigned I = 0; I < NumSlots; ++I)
746 if (Slots[I])
747 // Since the uses in Slots[I] originally preceded those in UseList, send
748 // Slots[I] in as the left parameter to maintain a stable sort.
749 UseList = mergeUseLists(Slots[I], UseList, Cmp);
750
751 // Fix the Prev pointers.
752 for (Use *I = UseList, **Prev = &UseList; I; I = I->Next) {
753 I->setPrev(Prev);
754 Prev = &I->Next;
755 }
756}
757
758// isa - Provide some specializations of isa so that we don't have to include
759// the subtype header files to test to see if the value is a subclass...
760//
761template <> struct isa_impl<Constant, Value> {
762 static inline bool doit(const Value &Val) {
763 static_assert(Value::ConstantFirstVal == 0, "Val.getValueID() >= Value::ConstantFirstVal");
764 return Val.getValueID() <= Value::ConstantLastVal;
25
Calling 'Value::getValueID'
26
Returning from 'Value::getValueID'
27
Assuming the condition is false
72
Calling 'Value::getValueID'
73
Returning from 'Value::getValueID'
74
Assuming the condition is false
118
Calling 'Value::getValueID'
119
Returning from 'Value::getValueID'
120
Assuming the condition is false
765 }
766};
767
768template <> struct isa_impl<ConstantData, Value> {
769 static inline bool doit(const Value &Val) {
770 return Val.getValueID() >= Value::ConstantDataFirstVal &&
771 Val.getValueID() <= Value::ConstantDataLastVal;
772 }
773};
774
775template <> struct isa_impl<ConstantAggregate, Value> {
776 static inline bool doit(const Value &Val) {
777 return Val.getValueID() >= Value::ConstantAggregateFirstVal &&
778 Val.getValueID() <= Value::ConstantAggregateLastVal;
779 }
780};
781
782template <> struct isa_impl<Argument, Value> {
783 static inline bool doit (const Value &Val) {
784 return Val.getValueID() == Value::ArgumentVal;
785 }
786};
787
788template <> struct isa_impl<InlineAsm, Value> {
789 static inline bool doit(const Value &Val) {
790 return Val.getValueID() == Value::InlineAsmVal;
791 }
792};
793
794template <> struct isa_impl<Instruction, Value> {
795 static inline bool doit(const Value &Val) {
796 return Val.getValueID() >= Value::InstructionVal;
797 }
798};
799
800template <> struct isa_impl<BasicBlock, Value> {
801 static inline bool doit(const Value &Val) {
802 return Val.getValueID() == Value::BasicBlockVal;
803 }
804};
805
806template <> struct isa_impl<Function, Value> {
807 static inline bool doit(const Value &Val) {
808 return Val.getValueID() == Value::FunctionVal;
809 }
810};
811
812template <> struct isa_impl<GlobalVariable, Value> {
813 static inline bool doit(const Value &Val) {
814 return Val.getValueID() == Value::GlobalVariableVal;
815 }
816};
817
818template <> struct isa_impl<GlobalAlias, Value> {
819 static inline bool doit(const Value &Val) {
820 return Val.getValueID() == Value::GlobalAliasVal;
821 }
822};
823
824template <> struct isa_impl<GlobalIFunc, Value> {
825 static inline bool doit(const Value &Val) {
826 return Val.getValueID() == Value::GlobalIFuncVal;
827 }
828};
829
830template <> struct isa_impl<GlobalIndirectSymbol, Value> {
831 static inline bool doit(const Value &Val) {
832 return isa<GlobalAlias>(Val) || isa<GlobalIFunc>(Val);
833 }
834};
835
836template <> struct isa_impl<GlobalValue, Value> {
837 static inline bool doit(const Value &Val) {
838 return isa<GlobalObject>(Val) || isa<GlobalIndirectSymbol>(Val);
839 }
840};
841
842template <> struct isa_impl<GlobalObject, Value> {
843 static inline bool doit(const Value &Val) {
844 return isa<GlobalVariable>(Val) || isa<Function>(Val);
845 }
846};
847
848// Create wrappers for C Binding types (see CBindingWrapping.h).
849DEFINE_ISA_CONVERSION_FUNCTIONS(Value, LLVMValueRef)inline Value *unwrap(LLVMValueRef P) { return reinterpret_cast
<Value*>(P); } inline LLVMValueRef wrap(const Value *P)
{ return reinterpret_cast<LLVMValueRef>(const_cast<
Value*>(P)); } template<typename T> inline T *unwrap
(LLVMValueRef P) { return cast<T>(unwrap(P)); }
850
851// Specialized opaque value conversions.
852inline Value **unwrap(LLVMValueRef *Vals) {
853 return reinterpret_cast<Value**>(Vals);
854}
855
856template<typename T>
857inline T **unwrap(LLVMValueRef *Vals, unsigned Length) {
858#ifndef NDEBUG
859 for (LLVMValueRef *I = Vals, *E = Vals + Length; I != E; ++I)
860 unwrap<T>(*I); // For side effect of calling assert on invalid usage.
861#endif
862 (void)Length;
863 return reinterpret_cast<T**>(Vals);
864}
865
866inline LLVMValueRef *wrap(const Value **Vals) {
867 return reinterpret_cast<LLVMValueRef*>(const_cast<Value**>(Vals));
868}
869
870} // end namespace llvm
871
872#endif // LLVM_IR_VALUE_H

/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/IR/DebugLoc.h

1//===- DebugLoc.h - Debug Location Information ------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines a number of light weight data structures used
11// to describe and track debug location information.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_IR_DEBUGLOC_H
16#define LLVM_IR_DEBUGLOC_H
17
18#include "llvm/IR/TrackingMDRef.h"
19#include "llvm/Support/DataTypes.h"
20
21namespace llvm {
22
23 class LLVMContext;
24 class raw_ostream;
25 class DILocation;
26
27 /// \brief A debug info location.
28 ///
29 /// This class is a wrapper around a tracking reference to an \a DILocation
30 /// pointer.
31 ///
32 /// To avoid extra includes, \a DebugLoc doubles the \a DILocation API with a
33 /// one based on relatively opaque \a MDNode pointers.
34 class DebugLoc {
35 TrackingMDNodeRef Loc;
36
37 public:
38 DebugLoc() = default;
39
40 /// \brief Construct from an \a DILocation.
41 DebugLoc(const DILocation *L);
42
43 /// \brief Construct from an \a MDNode.
44 ///
45 /// Note: if \c N is not an \a DILocation, a verifier check will fail, and
46 /// accessors will crash. However, construction from other nodes is
47 /// supported in order to handle forward references when reading textual
48 /// IR.
49 explicit DebugLoc(const MDNode *N);
50
51 /// \brief Get the underlying \a DILocation.
52 ///
53 /// \pre !*this or \c isa<DILocation>(getAsMDNode()).
54 /// @{
55 DILocation *get() const;
56 operator DILocation *() const { return get(); }
57 DILocation *operator->() const { return get(); }
58 DILocation &operator*() const { return *get(); }
59 /// @}
60
61 /// \brief Check for null.
62 ///
63 /// Check for null in a way that is safe with broken debug info. Unlike
64 /// the conversion to \c DILocation, this doesn't require that \c Loc is of
65 /// the right type. Important for cases like \a llvm::StripDebugInfo() and
66 /// \a Instruction::hasMetadata().
67 explicit operator bool() const { return Loc; }
43
Calling 'TypedTrackingMDRef::operator llvm::MDNode *'
48
Returning from 'TypedTrackingMDRef::operator llvm::MDNode *'
89
Calling 'TypedTrackingMDRef::operator llvm::MDNode *'
94
Returning from 'TypedTrackingMDRef::operator llvm::MDNode *'
135
Calling 'TypedTrackingMDRef::operator llvm::MDNode *'
140
Returning from 'TypedTrackingMDRef::operator llvm::MDNode *'
68
69 /// \brief Check whether this has a trivial destructor.
70 bool hasTrivialDestructor() const { return Loc.hasTrivialDestructor(); }
71
72 /// \brief Create a new DebugLoc.
73 ///
74 /// Create a new DebugLoc at the specified line/col and scope/inline. This
75 /// forwards to \a DILocation::get().
76 ///
77 /// If \c !Scope, returns a default-constructed \a DebugLoc.
78 ///
79 /// FIXME: Remove this. Users should use DILocation::get().
80 static DebugLoc get(unsigned Line, unsigned Col, const MDNode *Scope,
81 const MDNode *InlinedAt = nullptr);
82
83 enum { ReplaceLastInlinedAt = true };
84 /// Rebuild the entire inlined-at chain for this instruction so that the top of
85 /// the chain now is inlined-at the new call site.
86 /// \param InlinedAt The new outermost inlined-at in the chain.
87 /// \param ReplaceLast Replace the last location in the inlined-at chain.
88 static DebugLoc appendInlinedAt(DebugLoc DL, DILocation *InlinedAt,
89 LLVMContext &Ctx,
90 DenseMap<const MDNode *, MDNode *> &Cache,
91 bool ReplaceLast = false);
92
93 unsigned getLine() const;
94 unsigned getCol() const;
95 MDNode *getScope() const;
96 DILocation *getInlinedAt() const;
97
98 /// \brief Get the fully inlined-at scope for a DebugLoc.
99 ///
100 /// Gets the inlined-at scope for a DebugLoc.
101 MDNode *getInlinedAtScope() const;
102
103 /// \brief Find the debug info location for the start of the function.
104 ///
105 /// Walk up the scope chain of given debug loc and find line number info
106 /// for the function.
107 ///
108 /// FIXME: Remove this. Users should use DILocation/DILocalScope API to
109 /// find the subprogram, and then DILocation::get().
110 DebugLoc getFnDebugLoc() const;
111
112 /// \brief Return \c this as a bar \a MDNode.
113 MDNode *getAsMDNode() const { return Loc; }
114
115 bool operator==(const DebugLoc &DL) const { return Loc == DL.Loc; }
116 bool operator!=(const DebugLoc &DL) const { return Loc != DL.Loc; }
117
118 void dump() const;
119
120 /// \brief prints source location /path/to/file.exe:line:col @[inlined at]
121 void print(raw_ostream &OS) const;
122 };
123
124} // end namespace llvm
125
126#endif /* LLVM_SUPPORT_DEBUGLOC_H */

/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/IR/TrackingMDRef.h

1//===- llvm/IR/TrackingMDRef.h - Tracking Metadata references ---*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// References to metadata that track RAUW.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_IR_TRACKINGMDREF_H
15#define LLVM_IR_TRACKINGMDREF_H
16
17#include "llvm/IR/Metadata.h"
18#include <algorithm>
19#include <cassert>
20
21namespace llvm {
22
23/// \brief Tracking metadata reference.
24///
25/// This class behaves like \a TrackingVH, but for metadata.
26class TrackingMDRef {
27 Metadata *MD = nullptr;
28
29public:
30 TrackingMDRef() = default;
31 explicit TrackingMDRef(Metadata *MD) : MD(MD) { track(); }
32
33 TrackingMDRef(TrackingMDRef &&X) : MD(X.MD) { retrack(X); }
34 TrackingMDRef(const TrackingMDRef &X) : MD(X.MD) { track(); }
35
36 TrackingMDRef &operator=(TrackingMDRef &&X) {
37 if (&X == this)
38 return *this;
39
40 untrack();
41 MD = X.MD;
42 retrack(X);
43 return *this;
44 }
45
46 TrackingMDRef &operator=(const TrackingMDRef &X) {
47 if (&X == this)
48 return *this;
49
50 untrack();
51 MD = X.MD;
52 track();
53 return *this;
54 }
55
56 ~TrackingMDRef() { untrack(); }
57
58 Metadata *get() const { return MD; }
59 operator Metadata *() const { return get(); }
60 Metadata *operator->() const { return get(); }
61 Metadata &operator*() const { return *get(); }
62
63 void reset() {
64 untrack();
65 MD = nullptr;
66 }
67 void reset(Metadata *MD) {
68 untrack();
69 this->MD = MD;
70 track();
71 }
72
73 /// \brief Check whether this has a trivial destructor.
74 ///
75 /// If \c MD isn't replaceable, the destructor will be a no-op.
76 bool hasTrivialDestructor() const {
77 return !MD || !MetadataTracking::isReplaceable(*MD);
78 }
79
80 bool operator==(const TrackingMDRef &X) const { return MD == X.MD; }
81 bool operator!=(const TrackingMDRef &X) const { return MD != X.MD; }
82
83private:
84 void track() {
85 if (MD)
86 MetadataTracking::track(MD);
87 }
88
89 void untrack() {
90 if (MD)
91 MetadataTracking::untrack(MD);
92 }
93
94 void retrack(TrackingMDRef &X) {
95 assert(MD == X.MD && "Expected values to match")(static_cast <bool> (MD == X.MD && "Expected values to match"
) ? void (0) : __assert_fail ("MD == X.MD && \"Expected values to match\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/IR/TrackingMDRef.h"
, 95, __extension__ __PRETTY_FUNCTION__))
;
96 if (X.MD) {
97 MetadataTracking::retrack(X.MD, MD);
98 X.MD = nullptr;
99 }
100 }
101};
102
103/// \brief Typed tracking ref.
104///
105/// Track refererences of a particular type. It's useful to use this for \a
106/// MDNode and \a ValueAsMetadata.
107template <class T> class TypedTrackingMDRef {
108 TrackingMDRef Ref;
109
110public:
111 TypedTrackingMDRef() = default;
112 explicit TypedTrackingMDRef(T *MD) : Ref(static_cast<Metadata *>(MD)) {}
113
114 TypedTrackingMDRef(TypedTrackingMDRef &&X) : Ref(std::move(X.Ref)) {}
115 TypedTrackingMDRef(const TypedTrackingMDRef &X) : Ref(X.Ref) {}
116
117 TypedTrackingMDRef &operator=(TypedTrackingMDRef &&X) {
118 Ref = std::move(X.Ref);
119 return *this;
120 }
121
122 TypedTrackingMDRef &operator=(const TypedTrackingMDRef &X) {
123 Ref = X.Ref;
124 return *this;
125 }
126
127 T *get() const { return (T *)Ref.get(); }
45
Calling 'TrackingMDRef::get'
46
Returning from 'TrackingMDRef::get'
91
Calling 'TrackingMDRef::get'
92
Returning from 'TrackingMDRef::get'
137
Calling 'TrackingMDRef::get'
138
Returning from 'TrackingMDRef::get'
128 operator T *() const { return get(); }
44
Calling 'TypedTrackingMDRef::get'
47
Returning from 'TypedTrackingMDRef::get'
90
Calling 'TypedTrackingMDRef::get'
93
Returning from 'TypedTrackingMDRef::get'
136
Calling 'TypedTrackingMDRef::get'
139
Returning from 'TypedTrackingMDRef::get'
129 T *operator->() const { return get(); }
130 T &operator*() const { return *get(); }
131
132 bool operator==(const TypedTrackingMDRef &X) const { return Ref == X.Ref; }
133 bool operator!=(const TypedTrackingMDRef &X) const { return Ref != X.Ref; }
134
135 void reset() { Ref.reset(); }
136 void reset(T *MD) { Ref.reset(static_cast<Metadata *>(MD)); }
137
138 /// \brief Check whether this has a trivial destructor.
139 bool hasTrivialDestructor() const { return Ref.hasTrivialDestructor(); }
140};
141
142using TrackingMDNodeRef = TypedTrackingMDRef<MDNode>;
143using TrackingValueAsMetadataRef = TypedTrackingMDRef<ValueAsMetadata>;
144
145// Expose the underlying metadata to casting.
146template <> struct simplify_type<TrackingMDRef> {
147 using SimpleType = Metadata *;
148
149 static SimpleType getSimplifiedValue(TrackingMDRef &MD) { return MD.get(); }
150};
151
152template <> struct simplify_type<const TrackingMDRef> {
153 using SimpleType = Metadata *;
154
155 static SimpleType getSimplifiedValue(const TrackingMDRef &MD) {
156 return MD.get();
157 }
158};
159
160template <class T> struct simplify_type<TypedTrackingMDRef<T>> {
161 using SimpleType = T *;
162
163 static SimpleType getSimplifiedValue(TypedTrackingMDRef<T> &MD) {
164 return MD.get();
165 }
166};
167
168template <class T> struct simplify_type<const TypedTrackingMDRef<T>> {
169 using SimpleType = T *;
170
171 static SimpleType getSimplifiedValue(const TypedTrackingMDRef<T> &MD) {
172 return MD.get();
173 }
174};
175
176} // end namespace llvm
177
178#endif // LLVM_IR_TRACKINGMDREF_H