File: | build/source/llvm/lib/Target/X86/X86InterleavedAccess.cpp |
Warning: | line 319, column 11 1st function call argument is an uninitialized value |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- X86InterleavedAccess.cpp -------------------------------------------===// | |||
2 | // | |||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
4 | // See https://llvm.org/LICENSE.txt for license information. | |||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
6 | // | |||
7 | //===----------------------------------------------------------------------===// | |||
8 | // | |||
9 | /// \file | |||
10 | /// This file contains the X86 implementation of the interleaved accesses | |||
11 | /// optimization generating X86-specific instructions/intrinsics for | |||
12 | /// interleaved access groups. | |||
13 | // | |||
14 | //===----------------------------------------------------------------------===// | |||
15 | ||||
16 | #include "X86ISelLowering.h" | |||
17 | #include "X86Subtarget.h" | |||
18 | #include "llvm/ADT/ArrayRef.h" | |||
19 | #include "llvm/ADT/SmallVector.h" | |||
20 | #include "llvm/Analysis/VectorUtils.h" | |||
21 | #include "llvm/IR/Constants.h" | |||
22 | #include "llvm/IR/DataLayout.h" | |||
23 | #include "llvm/IR/DerivedTypes.h" | |||
24 | #include "llvm/IR/IRBuilder.h" | |||
25 | #include "llvm/IR/Instruction.h" | |||
26 | #include "llvm/IR/Instructions.h" | |||
27 | #include "llvm/IR/Module.h" | |||
28 | #include "llvm/IR/Type.h" | |||
29 | #include "llvm/IR/Value.h" | |||
30 | #include "llvm/Support/Casting.h" | |||
31 | #include "llvm/Support/MachineValueType.h" | |||
32 | #include <algorithm> | |||
33 | #include <cassert> | |||
34 | #include <cmath> | |||
35 | #include <cstdint> | |||
36 | ||||
37 | using namespace llvm; | |||
38 | ||||
39 | namespace { | |||
40 | ||||
41 | /// This class holds necessary information to represent an interleaved | |||
42 | /// access group and supports utilities to lower the group into | |||
43 | /// X86-specific instructions/intrinsics. | |||
44 | /// E.g. A group of interleaving access loads (Factor = 2; accessing every | |||
45 | /// other element) | |||
46 | /// %wide.vec = load <8 x i32>, <8 x i32>* %ptr | |||
47 | /// %v0 = shuffle <8 x i32> %wide.vec, <8 x i32> poison, <0, 2, 4, 6> | |||
48 | /// %v1 = shuffle <8 x i32> %wide.vec, <8 x i32> poison, <1, 3, 5, 7> | |||
49 | class X86InterleavedAccessGroup { | |||
50 | /// Reference to the wide-load instruction of an interleaved access | |||
51 | /// group. | |||
52 | Instruction *const Inst; | |||
53 | ||||
54 | /// Reference to the shuffle(s), consumer(s) of the (load) 'Inst'. | |||
55 | ArrayRef<ShuffleVectorInst *> Shuffles; | |||
56 | ||||
57 | /// Reference to the starting index of each user-shuffle. | |||
58 | ArrayRef<unsigned> Indices; | |||
59 | ||||
60 | /// Reference to the interleaving stride in terms of elements. | |||
61 | const unsigned Factor; | |||
62 | ||||
63 | /// Reference to the underlying target. | |||
64 | const X86Subtarget &Subtarget; | |||
65 | ||||
66 | const DataLayout &DL; | |||
67 | ||||
68 | IRBuilder<> &Builder; | |||
69 | ||||
70 | /// Breaks down a vector \p 'Inst' of N elements into \p NumSubVectors | |||
71 | /// sub vectors of type \p T. Returns the sub-vectors in \p DecomposedVectors. | |||
72 | void decompose(Instruction *Inst, unsigned NumSubVectors, FixedVectorType *T, | |||
73 | SmallVectorImpl<Instruction *> &DecomposedVectors); | |||
74 | ||||
75 | /// Performs matrix transposition on a 4x4 matrix \p InputVectors and | |||
76 | /// returns the transposed-vectors in \p TransposedVectors. | |||
77 | /// E.g. | |||
78 | /// InputVectors: | |||
79 | /// In-V0 = p1, p2, p3, p4 | |||
80 | /// In-V1 = q1, q2, q3, q4 | |||
81 | /// In-V2 = r1, r2, r3, r4 | |||
82 | /// In-V3 = s1, s2, s3, s4 | |||
83 | /// OutputVectors: | |||
84 | /// Out-V0 = p1, q1, r1, s1 | |||
85 | /// Out-V1 = p2, q2, r2, s2 | |||
86 | /// Out-V2 = p3, q3, r3, s3 | |||
87 | /// Out-V3 = P4, q4, r4, s4 | |||
88 | void transpose_4x4(ArrayRef<Instruction *> InputVectors, | |||
89 | SmallVectorImpl<Value *> &TransposedMatrix); | |||
90 | void interleave8bitStride4(ArrayRef<Instruction *> InputVectors, | |||
91 | SmallVectorImpl<Value *> &TransposedMatrix, | |||
92 | unsigned NumSubVecElems); | |||
93 | void interleave8bitStride4VF8(ArrayRef<Instruction *> InputVectors, | |||
94 | SmallVectorImpl<Value *> &TransposedMatrix); | |||
95 | void interleave8bitStride3(ArrayRef<Instruction *> InputVectors, | |||
96 | SmallVectorImpl<Value *> &TransposedMatrix, | |||
97 | unsigned NumSubVecElems); | |||
98 | void deinterleave8bitStride3(ArrayRef<Instruction *> InputVectors, | |||
99 | SmallVectorImpl<Value *> &TransposedMatrix, | |||
100 | unsigned NumSubVecElems); | |||
101 | ||||
102 | public: | |||
103 | /// In order to form an interleaved access group X86InterleavedAccessGroup | |||
104 | /// requires a wide-load instruction \p 'I', a group of interleaved-vectors | |||
105 | /// \p Shuffs, reference to the first indices of each interleaved-vector | |||
106 | /// \p 'Ind' and the interleaving stride factor \p F. In order to generate | |||
107 | /// X86-specific instructions/intrinsics it also requires the underlying | |||
108 | /// target information \p STarget. | |||
109 | explicit X86InterleavedAccessGroup(Instruction *I, | |||
110 | ArrayRef<ShuffleVectorInst *> Shuffs, | |||
111 | ArrayRef<unsigned> Ind, const unsigned F, | |||
112 | const X86Subtarget &STarget, | |||
113 | IRBuilder<> &B) | |||
114 | : Inst(I), Shuffles(Shuffs), Indices(Ind), Factor(F), Subtarget(STarget), | |||
115 | DL(Inst->getModule()->getDataLayout()), Builder(B) {} | |||
116 | ||||
117 | /// Returns true if this interleaved access group can be lowered into | |||
118 | /// x86-specific instructions/intrinsics, false otherwise. | |||
119 | bool isSupported() const; | |||
120 | ||||
121 | /// Lowers this interleaved access group into X86-specific | |||
122 | /// instructions/intrinsics. | |||
123 | bool lowerIntoOptimizedSequence(); | |||
124 | }; | |||
125 | ||||
126 | } // end anonymous namespace | |||
127 | ||||
128 | bool X86InterleavedAccessGroup::isSupported() const { | |||
129 | VectorType *ShuffleVecTy = Shuffles[0]->getType(); | |||
130 | Type *ShuffleEltTy = ShuffleVecTy->getElementType(); | |||
131 | unsigned ShuffleElemSize = DL.getTypeSizeInBits(ShuffleEltTy); | |||
132 | unsigned WideInstSize; | |||
133 | ||||
134 | // Currently, lowering is supported for the following vectors: | |||
135 | // Stride 4: | |||
136 | // 1. Store and load of 4-element vectors of 64 bits on AVX. | |||
137 | // 2. Store of 16/32-element vectors of 8 bits on AVX. | |||
138 | // Stride 3: | |||
139 | // 1. Load of 16/32-element vectors of 8 bits on AVX. | |||
140 | if (!Subtarget.hasAVX() || (Factor != 4 && Factor != 3)) | |||
141 | return false; | |||
142 | ||||
143 | if (isa<LoadInst>(Inst)) { | |||
144 | WideInstSize = DL.getTypeSizeInBits(Inst->getType()); | |||
145 | if (cast<LoadInst>(Inst)->getPointerAddressSpace()) | |||
146 | return false; | |||
147 | } else | |||
148 | WideInstSize = DL.getTypeSizeInBits(Shuffles[0]->getType()); | |||
149 | ||||
150 | // We support shuffle represents stride 4 for byte type with size of | |||
151 | // WideInstSize. | |||
152 | if (ShuffleElemSize == 64 && WideInstSize == 1024 && Factor == 4) | |||
153 | return true; | |||
154 | ||||
155 | if (ShuffleElemSize == 8 && isa<StoreInst>(Inst) && Factor == 4 && | |||
156 | (WideInstSize == 256 || WideInstSize == 512 || WideInstSize == 1024 || | |||
157 | WideInstSize == 2048)) | |||
158 | return true; | |||
159 | ||||
160 | if (ShuffleElemSize == 8 && Factor == 3 && | |||
161 | (WideInstSize == 384 || WideInstSize == 768 || WideInstSize == 1536)) | |||
162 | return true; | |||
163 | ||||
164 | return false; | |||
165 | } | |||
166 | ||||
167 | void X86InterleavedAccessGroup::decompose( | |||
168 | Instruction *VecInst, unsigned NumSubVectors, FixedVectorType *SubVecTy, | |||
169 | SmallVectorImpl<Instruction *> &DecomposedVectors) { | |||
170 | assert((isa<LoadInst>(VecInst) || isa<ShuffleVectorInst>(VecInst)) &&(static_cast <bool> ((isa<LoadInst>(VecInst) || isa <ShuffleVectorInst>(VecInst)) && "Expected Load or Shuffle" ) ? void (0) : __assert_fail ("(isa<LoadInst>(VecInst) || isa<ShuffleVectorInst>(VecInst)) && \"Expected Load or Shuffle\"" , "llvm/lib/Target/X86/X86InterleavedAccess.cpp", 171, __extension__ __PRETTY_FUNCTION__)) | |||
171 | "Expected Load or Shuffle")(static_cast <bool> ((isa<LoadInst>(VecInst) || isa <ShuffleVectorInst>(VecInst)) && "Expected Load or Shuffle" ) ? void (0) : __assert_fail ("(isa<LoadInst>(VecInst) || isa<ShuffleVectorInst>(VecInst)) && \"Expected Load or Shuffle\"" , "llvm/lib/Target/X86/X86InterleavedAccess.cpp", 171, __extension__ __PRETTY_FUNCTION__)); | |||
172 | ||||
173 | Type *VecWidth = VecInst->getType(); | |||
174 | (void)VecWidth; | |||
175 | assert(VecWidth->isVectorTy() &&(static_cast <bool> (VecWidth->isVectorTy() && DL.getTypeSizeInBits(VecWidth) >= DL.getTypeSizeInBits(SubVecTy ) * NumSubVectors && "Invalid Inst-size!!!") ? void ( 0) : __assert_fail ("VecWidth->isVectorTy() && DL.getTypeSizeInBits(VecWidth) >= DL.getTypeSizeInBits(SubVecTy) * NumSubVectors && \"Invalid Inst-size!!!\"" , "llvm/lib/Target/X86/X86InterleavedAccess.cpp", 178, __extension__ __PRETTY_FUNCTION__)) | |||
176 | DL.getTypeSizeInBits(VecWidth) >=(static_cast <bool> (VecWidth->isVectorTy() && DL.getTypeSizeInBits(VecWidth) >= DL.getTypeSizeInBits(SubVecTy ) * NumSubVectors && "Invalid Inst-size!!!") ? void ( 0) : __assert_fail ("VecWidth->isVectorTy() && DL.getTypeSizeInBits(VecWidth) >= DL.getTypeSizeInBits(SubVecTy) * NumSubVectors && \"Invalid Inst-size!!!\"" , "llvm/lib/Target/X86/X86InterleavedAccess.cpp", 178, __extension__ __PRETTY_FUNCTION__)) | |||
177 | DL.getTypeSizeInBits(SubVecTy) * NumSubVectors &&(static_cast <bool> (VecWidth->isVectorTy() && DL.getTypeSizeInBits(VecWidth) >= DL.getTypeSizeInBits(SubVecTy ) * NumSubVectors && "Invalid Inst-size!!!") ? void ( 0) : __assert_fail ("VecWidth->isVectorTy() && DL.getTypeSizeInBits(VecWidth) >= DL.getTypeSizeInBits(SubVecTy) * NumSubVectors && \"Invalid Inst-size!!!\"" , "llvm/lib/Target/X86/X86InterleavedAccess.cpp", 178, __extension__ __PRETTY_FUNCTION__)) | |||
178 | "Invalid Inst-size!!!")(static_cast <bool> (VecWidth->isVectorTy() && DL.getTypeSizeInBits(VecWidth) >= DL.getTypeSizeInBits(SubVecTy ) * NumSubVectors && "Invalid Inst-size!!!") ? void ( 0) : __assert_fail ("VecWidth->isVectorTy() && DL.getTypeSizeInBits(VecWidth) >= DL.getTypeSizeInBits(SubVecTy) * NumSubVectors && \"Invalid Inst-size!!!\"" , "llvm/lib/Target/X86/X86InterleavedAccess.cpp", 178, __extension__ __PRETTY_FUNCTION__)); | |||
179 | ||||
180 | if (auto *SVI = dyn_cast<ShuffleVectorInst>(VecInst)) { | |||
181 | Value *Op0 = SVI->getOperand(0); | |||
182 | Value *Op1 = SVI->getOperand(1); | |||
183 | ||||
184 | // Generate N(= NumSubVectors) shuffles of T(= SubVecTy) type. | |||
185 | for (unsigned i = 0; i < NumSubVectors; ++i) | |||
186 | DecomposedVectors.push_back( | |||
187 | cast<ShuffleVectorInst>(Builder.CreateShuffleVector( | |||
188 | Op0, Op1, | |||
189 | createSequentialMask(Indices[i], SubVecTy->getNumElements(), | |||
190 | 0)))); | |||
191 | return; | |||
192 | } | |||
193 | ||||
194 | // Decompose the load instruction. | |||
195 | LoadInst *LI = cast<LoadInst>(VecInst); | |||
196 | Type *VecBaseTy, *VecBasePtrTy; | |||
197 | Value *VecBasePtr; | |||
198 | unsigned int NumLoads = NumSubVectors; | |||
199 | // In the case of stride 3 with a vector of 32 elements load the information | |||
200 | // in the following way: | |||
201 | // [0,1...,VF/2-1,VF/2+VF,VF/2+VF+1,...,2VF-1] | |||
202 | unsigned VecLength = DL.getTypeSizeInBits(VecWidth); | |||
203 | if (VecLength == 768 || VecLength == 1536) { | |||
204 | VecBaseTy = FixedVectorType::get(Type::getInt8Ty(LI->getContext()), 16); | |||
205 | VecBasePtrTy = VecBaseTy->getPointerTo(LI->getPointerAddressSpace()); | |||
206 | VecBasePtr = Builder.CreateBitCast(LI->getPointerOperand(), VecBasePtrTy); | |||
207 | NumLoads = NumSubVectors * (VecLength / 384); | |||
208 | } else { | |||
209 | VecBaseTy = SubVecTy; | |||
210 | VecBasePtrTy = VecBaseTy->getPointerTo(LI->getPointerAddressSpace()); | |||
211 | VecBasePtr = Builder.CreateBitCast(LI->getPointerOperand(), VecBasePtrTy); | |||
212 | } | |||
213 | // Generate N loads of T type. | |||
214 | assert(VecBaseTy->getPrimitiveSizeInBits().isKnownMultipleOf(8) &&(static_cast <bool> (VecBaseTy->getPrimitiveSizeInBits ().isKnownMultipleOf(8) && "VecBaseTy's size must be a multiple of 8" ) ? void (0) : __assert_fail ("VecBaseTy->getPrimitiveSizeInBits().isKnownMultipleOf(8) && \"VecBaseTy's size must be a multiple of 8\"" , "llvm/lib/Target/X86/X86InterleavedAccess.cpp", 215, __extension__ __PRETTY_FUNCTION__)) | |||
215 | "VecBaseTy's size must be a multiple of 8")(static_cast <bool> (VecBaseTy->getPrimitiveSizeInBits ().isKnownMultipleOf(8) && "VecBaseTy's size must be a multiple of 8" ) ? void (0) : __assert_fail ("VecBaseTy->getPrimitiveSizeInBits().isKnownMultipleOf(8) && \"VecBaseTy's size must be a multiple of 8\"" , "llvm/lib/Target/X86/X86InterleavedAccess.cpp", 215, __extension__ __PRETTY_FUNCTION__)); | |||
216 | const Align FirstAlignment = LI->getAlign(); | |||
217 | const Align SubsequentAlignment = commonAlignment( | |||
218 | FirstAlignment, VecBaseTy->getPrimitiveSizeInBits().getFixedValue() / 8); | |||
219 | Align Alignment = FirstAlignment; | |||
220 | for (unsigned i = 0; i < NumLoads; i++) { | |||
221 | // TODO: Support inbounds GEP. | |||
222 | Value *NewBasePtr = | |||
223 | Builder.CreateGEP(VecBaseTy, VecBasePtr, Builder.getInt32(i)); | |||
224 | Instruction *NewLoad = | |||
225 | Builder.CreateAlignedLoad(VecBaseTy, NewBasePtr, Alignment); | |||
226 | DecomposedVectors.push_back(NewLoad); | |||
227 | Alignment = SubsequentAlignment; | |||
228 | } | |||
229 | } | |||
230 | ||||
231 | // Changing the scale of the vector type by reducing the number of elements and | |||
232 | // doubling the scalar size. | |||
233 | static MVT scaleVectorType(MVT VT) { | |||
234 | unsigned ScalarSize = VT.getVectorElementType().getScalarSizeInBits() * 2; | |||
235 | return MVT::getVectorVT(MVT::getIntegerVT(ScalarSize), | |||
236 | VT.getVectorNumElements() / 2); | |||
237 | } | |||
238 | ||||
239 | static constexpr int Concat[] = { | |||
240 | 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, | |||
241 | 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, | |||
242 | 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, | |||
243 | 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63}; | |||
244 | ||||
245 | // genShuffleBland - Creates shuffle according to two vectors.This function is | |||
246 | // only works on instructions with lane inside 256 registers. According to | |||
247 | // the mask 'Mask' creates a new Mask 'Out' by the offset of the mask. The | |||
248 | // offset amount depends on the two integer, 'LowOffset' and 'HighOffset'. | |||
249 | // Where the 'LowOffset' refers to the first vector and the highOffset refers to | |||
250 | // the second vector. | |||
251 | // |a0....a5,b0....b4,c0....c4|a16..a21,b16..b20,c16..c20| | |||
252 | // |c5...c10,a5....a9,b5....b9|c21..c26,a22..a26,b21..b25| | |||
253 | // |b10..b15,c11..c15,a10..a15|b26..b31,c27..c31,a27..a31| | |||
254 | // For the sequence to work as a mirror to the load. | |||
255 | // We must consider the elements order as above. | |||
256 | // In this function we are combining two types of shuffles. | |||
257 | // The first one is vpshufed and the second is a type of "blend" shuffle. | |||
258 | // By computing the shuffle on a sequence of 16 elements(one lane) and add the | |||
259 | // correct offset. We are creating a vpsuffed + blend sequence between two | |||
260 | // shuffles. | |||
261 | static void genShuffleBland(MVT VT, ArrayRef<int> Mask, | |||
262 | SmallVectorImpl<int> &Out, int LowOffset, | |||
263 | int HighOffset) { | |||
264 | assert(VT.getSizeInBits() >= 256 &&(static_cast <bool> (VT.getSizeInBits() >= 256 && "This function doesn't accept width smaller then 256") ? void (0) : __assert_fail ("VT.getSizeInBits() >= 256 && \"This function doesn't accept width smaller then 256\"" , "llvm/lib/Target/X86/X86InterleavedAccess.cpp", 265, __extension__ __PRETTY_FUNCTION__)) | |||
265 | "This function doesn't accept width smaller then 256")(static_cast <bool> (VT.getSizeInBits() >= 256 && "This function doesn't accept width smaller then 256") ? void (0) : __assert_fail ("VT.getSizeInBits() >= 256 && \"This function doesn't accept width smaller then 256\"" , "llvm/lib/Target/X86/X86InterleavedAccess.cpp", 265, __extension__ __PRETTY_FUNCTION__)); | |||
266 | unsigned NumOfElm = VT.getVectorNumElements(); | |||
267 | for (int I : Mask) | |||
268 | Out.push_back(I + LowOffset); | |||
269 | for (int I : Mask) | |||
270 | Out.push_back(I + HighOffset + NumOfElm); | |||
271 | } | |||
272 | ||||
273 | // reorderSubVector returns the data to is the original state. And de-facto is | |||
274 | // the opposite of the function concatSubVector. | |||
275 | ||||
276 | // For VecElems = 16 | |||
277 | // Invec[0] - |0| TransposedMatrix[0] - |0| | |||
278 | // Invec[1] - |1| => TransposedMatrix[1] - |1| | |||
279 | // Invec[2] - |2| TransposedMatrix[2] - |2| | |||
280 | ||||
281 | // For VecElems = 32 | |||
282 | // Invec[0] - |0|3| TransposedMatrix[0] - |0|1| | |||
283 | // Invec[1] - |1|4| => TransposedMatrix[1] - |2|3| | |||
284 | // Invec[2] - |2|5| TransposedMatrix[2] - |4|5| | |||
285 | ||||
286 | // For VecElems = 64 | |||
287 | // Invec[0] - |0|3|6|9 | TransposedMatrix[0] - |0|1|2 |3 | | |||
288 | // Invec[1] - |1|4|7|10| => TransposedMatrix[1] - |4|5|6 |7 | | |||
289 | // Invec[2] - |2|5|8|11| TransposedMatrix[2] - |8|9|10|11| | |||
290 | ||||
291 | static void reorderSubVector(MVT VT, SmallVectorImpl<Value *> &TransposedMatrix, | |||
292 | ArrayRef<Value *> Vec, ArrayRef<int> VPShuf, | |||
293 | unsigned VecElems, unsigned Stride, | |||
294 | IRBuilder<> &Builder) { | |||
295 | ||||
296 | if (VecElems == 16) { | |||
| ||||
297 | for (unsigned i = 0; i < Stride; i++) | |||
298 | TransposedMatrix[i] = Builder.CreateShuffleVector(Vec[i], VPShuf); | |||
299 | return; | |||
300 | } | |||
301 | ||||
302 | SmallVector<int, 32> OptimizeShuf; | |||
303 | Value *Temp[8]; | |||
304 | ||||
305 | for (unsigned i = 0; i < (VecElems / 16) * Stride; i += 2) { | |||
306 | genShuffleBland(VT, VPShuf, OptimizeShuf, (i / Stride) * 16, | |||
307 | (i + 1) / Stride * 16); | |||
308 | Temp[i / 2] = Builder.CreateShuffleVector( | |||
309 | Vec[i % Stride], Vec[(i + 1) % Stride], OptimizeShuf); | |||
310 | OptimizeShuf.clear(); | |||
311 | } | |||
312 | ||||
313 | if (VecElems == 32) { | |||
314 | std::copy(Temp, Temp + Stride, TransposedMatrix.begin()); | |||
315 | return; | |||
316 | } else | |||
317 | for (unsigned i = 0; i < Stride; i++) | |||
318 | TransposedMatrix[i] = | |||
319 | Builder.CreateShuffleVector(Temp[2 * i], Temp[2 * i + 1], Concat); | |||
| ||||
320 | } | |||
321 | ||||
322 | void X86InterleavedAccessGroup::interleave8bitStride4VF8( | |||
323 | ArrayRef<Instruction *> Matrix, | |||
324 | SmallVectorImpl<Value *> &TransposedMatrix) { | |||
325 | // Assuming we start from the following vectors: | |||
326 | // Matrix[0]= c0 c1 c2 c3 c4 ... c7 | |||
327 | // Matrix[1]= m0 m1 m2 m3 m4 ... m7 | |||
328 | // Matrix[2]= y0 y1 y2 y3 y4 ... y7 | |||
329 | // Matrix[3]= k0 k1 k2 k3 k4 ... k7 | |||
330 | ||||
331 | MVT VT = MVT::v8i16; | |||
332 | TransposedMatrix.resize(2); | |||
333 | SmallVector<int, 16> MaskLow; | |||
334 | SmallVector<int, 32> MaskLowTemp1, MaskLowWord; | |||
335 | SmallVector<int, 32> MaskHighTemp1, MaskHighWord; | |||
336 | ||||
337 | for (unsigned i = 0; i < 8; ++i) { | |||
338 | MaskLow.push_back(i); | |||
339 | MaskLow.push_back(i + 8); | |||
340 | } | |||
341 | ||||
342 | createUnpackShuffleMask(VT, MaskLowTemp1, true, false); | |||
343 | createUnpackShuffleMask(VT, MaskHighTemp1, false, false); | |||
344 | narrowShuffleMaskElts(2, MaskHighTemp1, MaskHighWord); | |||
345 | narrowShuffleMaskElts(2, MaskLowTemp1, MaskLowWord); | |||
346 | // IntrVec1Low = c0 m0 c1 m1 c2 m2 c3 m3 c4 m4 c5 m5 c6 m6 c7 m7 | |||
347 | // IntrVec2Low = y0 k0 y1 k1 y2 k2 y3 k3 y4 k4 y5 k5 y6 k6 y7 k7 | |||
348 | Value *IntrVec1Low = | |||
349 | Builder.CreateShuffleVector(Matrix[0], Matrix[1], MaskLow); | |||
350 | Value *IntrVec2Low = | |||
351 | Builder.CreateShuffleVector(Matrix[2], Matrix[3], MaskLow); | |||
352 | ||||
353 | // TransposedMatrix[0] = c0 m0 y0 k0 c1 m1 y1 k1 c2 m2 y2 k2 c3 m3 y3 k3 | |||
354 | // TransposedMatrix[1] = c4 m4 y4 k4 c5 m5 y5 k5 c6 m6 y6 k6 c7 m7 y7 k7 | |||
355 | ||||
356 | TransposedMatrix[0] = | |||
357 | Builder.CreateShuffleVector(IntrVec1Low, IntrVec2Low, MaskLowWord); | |||
358 | TransposedMatrix[1] = | |||
359 | Builder.CreateShuffleVector(IntrVec1Low, IntrVec2Low, MaskHighWord); | |||
360 | } | |||
361 | ||||
362 | void X86InterleavedAccessGroup::interleave8bitStride4( | |||
363 | ArrayRef<Instruction *> Matrix, SmallVectorImpl<Value *> &TransposedMatrix, | |||
364 | unsigned NumOfElm) { | |||
365 | // Example: Assuming we start from the following vectors: | |||
366 | // Matrix[0]= c0 c1 c2 c3 c4 ... c31 | |||
367 | // Matrix[1]= m0 m1 m2 m3 m4 ... m31 | |||
368 | // Matrix[2]= y0 y1 y2 y3 y4 ... y31 | |||
369 | // Matrix[3]= k0 k1 k2 k3 k4 ... k31 | |||
370 | ||||
371 | MVT VT = MVT::getVectorVT(MVT::i8, NumOfElm); | |||
372 | MVT HalfVT = scaleVectorType(VT); | |||
373 | ||||
374 | TransposedMatrix.resize(4); | |||
375 | SmallVector<int, 32> MaskHigh; | |||
376 | SmallVector<int, 32> MaskLow; | |||
377 | SmallVector<int, 32> LowHighMask[2]; | |||
378 | SmallVector<int, 32> MaskHighTemp; | |||
379 | SmallVector<int, 32> MaskLowTemp; | |||
380 | ||||
381 | // MaskHighTemp and MaskLowTemp built in the vpunpckhbw and vpunpcklbw X86 | |||
382 | // shuffle pattern. | |||
383 | ||||
384 | createUnpackShuffleMask(VT, MaskLow, true, false); | |||
385 | createUnpackShuffleMask(VT, MaskHigh, false, false); | |||
386 | ||||
387 | // MaskHighTemp1 and MaskLowTemp1 built in the vpunpckhdw and vpunpckldw X86 | |||
388 | // shuffle pattern. | |||
389 | ||||
390 | createUnpackShuffleMask(HalfVT, MaskLowTemp, true, false); | |||
391 | createUnpackShuffleMask(HalfVT, MaskHighTemp, false, false); | |||
392 | narrowShuffleMaskElts(2, MaskLowTemp, LowHighMask[0]); | |||
393 | narrowShuffleMaskElts(2, MaskHighTemp, LowHighMask[1]); | |||
394 | ||||
395 | // IntrVec1Low = c0 m0 c1 m1 ... c7 m7 | c16 m16 c17 m17 ... c23 m23 | |||
396 | // IntrVec1High = c8 m8 c9 m9 ... c15 m15 | c24 m24 c25 m25 ... c31 m31 | |||
397 | // IntrVec2Low = y0 k0 y1 k1 ... y7 k7 | y16 k16 y17 k17 ... y23 k23 | |||
398 | // IntrVec2High = y8 k8 y9 k9 ... y15 k15 | y24 k24 y25 k25 ... y31 k31 | |||
399 | Value *IntrVec[4]; | |||
400 | ||||
401 | IntrVec[0] = Builder.CreateShuffleVector(Matrix[0], Matrix[1], MaskLow); | |||
402 | IntrVec[1] = Builder.CreateShuffleVector(Matrix[0], Matrix[1], MaskHigh); | |||
403 | IntrVec[2] = Builder.CreateShuffleVector(Matrix[2], Matrix[3], MaskLow); | |||
404 | IntrVec[3] = Builder.CreateShuffleVector(Matrix[2], Matrix[3], MaskHigh); | |||
405 | ||||
406 | // cmyk4 cmyk5 cmyk6 cmyk7 | cmyk20 cmyk21 cmyk22 cmyk23 | |||
407 | // cmyk12 cmyk13 cmyk14 cmyk15 | cmyk28 cmyk29 cmyk30 cmyk31 | |||
408 | // cmyk0 cmyk1 cmyk2 cmyk3 | cmyk16 cmyk17 cmyk18 cmyk19 | |||
409 | // cmyk8 cmyk9 cmyk10 cmyk11 | cmyk24 cmyk25 cmyk26 cmyk27 | |||
410 | ||||
411 | Value *VecOut[4]; | |||
412 | for (int i = 0; i < 4; i++) | |||
413 | VecOut[i] = Builder.CreateShuffleVector(IntrVec[i / 2], IntrVec[i / 2 + 2], | |||
414 | LowHighMask[i % 2]); | |||
415 | ||||
416 | // cmyk0 cmyk1 cmyk2 cmyk3 | cmyk4 cmyk5 cmyk6 cmyk7 | |||
417 | // cmyk8 cmyk9 cmyk10 cmyk11 | cmyk12 cmyk13 cmyk14 cmyk15 | |||
418 | // cmyk16 cmyk17 cmyk18 cmyk19 | cmyk20 cmyk21 cmyk22 cmyk23 | |||
419 | // cmyk24 cmyk25 cmyk26 cmyk27 | cmyk28 cmyk29 cmyk30 cmyk31 | |||
420 | ||||
421 | if (VT == MVT::v16i8) { | |||
422 | std::copy(VecOut, VecOut + 4, TransposedMatrix.begin()); | |||
423 | return; | |||
424 | } | |||
425 | ||||
426 | reorderSubVector(VT, TransposedMatrix, VecOut, ArrayRef(Concat, 16), NumOfElm, | |||
427 | 4, Builder); | |||
428 | } | |||
429 | ||||
430 | // createShuffleStride returns shuffle mask of size N. | |||
431 | // The shuffle pattern is as following : | |||
432 | // {0, Stride%(VF/Lane), (2*Stride%(VF/Lane))...(VF*Stride/Lane)%(VF/Lane), | |||
433 | // (VF/ Lane) ,(VF / Lane)+Stride%(VF/Lane),..., | |||
434 | // (VF / Lane)+(VF*Stride/Lane)%(VF/Lane)} | |||
435 | // Where Lane is the # of lanes in a register: | |||
436 | // VectorSize = 128 => Lane = 1 | |||
437 | // VectorSize = 256 => Lane = 2 | |||
438 | // For example shuffle pattern for VF 16 register size 256 -> lanes = 2 | |||
439 | // {<[0|3|6|1|4|7|2|5]-[8|11|14|9|12|15|10|13]>} | |||
440 | static void createShuffleStride(MVT VT, int Stride, | |||
441 | SmallVectorImpl<int> &Mask) { | |||
442 | int VectorSize = VT.getSizeInBits(); | |||
443 | int VF = VT.getVectorNumElements(); | |||
444 | int LaneCount = std::max(VectorSize / 128, 1); | |||
445 | for (int Lane = 0; Lane < LaneCount; Lane++) | |||
446 | for (int i = 0, LaneSize = VF / LaneCount; i != LaneSize; ++i) | |||
447 | Mask.push_back((i * Stride) % LaneSize + LaneSize * Lane); | |||
448 | } | |||
449 | ||||
450 | // setGroupSize sets 'SizeInfo' to the size(number of elements) of group | |||
451 | // inside mask a shuffleMask. A mask contains exactly 3 groups, where | |||
452 | // each group is a monotonically increasing sequence with stride 3. | |||
453 | // For example shuffleMask {0,3,6,1,4,7,2,5} => {3,3,2} | |||
454 | static void setGroupSize(MVT VT, SmallVectorImpl<int> &SizeInfo) { | |||
455 | int VectorSize = VT.getSizeInBits(); | |||
456 | int VF = VT.getVectorNumElements() / std::max(VectorSize / 128, 1); | |||
457 | for (int i = 0, FirstGroupElement = 0; i < 3; i++) { | |||
458 | int GroupSize = std::ceil((VF - FirstGroupElement) / 3.0); | |||
459 | SizeInfo.push_back(GroupSize); | |||
460 | FirstGroupElement = ((GroupSize)*3 + FirstGroupElement) % VF; | |||
461 | } | |||
462 | } | |||
463 | ||||
464 | // DecodePALIGNRMask returns the shuffle mask of vpalign instruction. | |||
465 | // vpalign works according to lanes | |||
466 | // Where Lane is the # of lanes in a register: | |||
467 | // VectorWide = 128 => Lane = 1 | |||
468 | // VectorWide = 256 => Lane = 2 | |||
469 | // For Lane = 1 shuffle pattern is: {DiffToJump,...,DiffToJump+VF-1}. | |||
470 | // For Lane = 2 shuffle pattern is: | |||
471 | // {DiffToJump,...,VF/2-1,VF,...,DiffToJump+VF-1}. | |||
472 | // Imm variable sets the offset amount. The result of the | |||
473 | // function is stored inside ShuffleMask vector and it built as described in | |||
474 | // the begin of the description. AlignDirection is a boolean that indicates the | |||
475 | // direction of the alignment. (false - align to the "right" side while true - | |||
476 | // align to the "left" side) | |||
477 | static void DecodePALIGNRMask(MVT VT, unsigned Imm, | |||
478 | SmallVectorImpl<int> &ShuffleMask, | |||
479 | bool AlignDirection = true, bool Unary = false) { | |||
480 | unsigned NumElts = VT.getVectorNumElements(); | |||
481 | unsigned NumLanes = std::max((int)VT.getSizeInBits() / 128, 1); | |||
482 | unsigned NumLaneElts = NumElts / NumLanes; | |||
483 | ||||
484 | Imm = AlignDirection ? Imm : (NumLaneElts - Imm); | |||
485 | unsigned Offset = Imm * (VT.getScalarSizeInBits() / 8); | |||
486 | ||||
487 | for (unsigned l = 0; l != NumElts; l += NumLaneElts) { | |||
488 | for (unsigned i = 0; i != NumLaneElts; ++i) { | |||
489 | unsigned Base = i + Offset; | |||
490 | // if i+offset is out of this lane then we actually need the other source | |||
491 | // If Unary the other source is the first source. | |||
492 | if (Base >= NumLaneElts) | |||
493 | Base = Unary ? Base % NumLaneElts : Base + NumElts - NumLaneElts; | |||
494 | ShuffleMask.push_back(Base + l); | |||
495 | } | |||
496 | } | |||
497 | } | |||
498 | ||||
499 | // concatSubVector - The function rebuilds the data to a correct expected | |||
500 | // order. An assumption(The shape of the matrix) was taken for the | |||
501 | // deinterleaved to work with lane's instructions like 'vpalign' or 'vphuf'. | |||
502 | // This function ensures that the data is built in correct way for the lane | |||
503 | // instructions. Each lane inside the vector is a 128-bit length. | |||
504 | // | |||
505 | // The 'InVec' argument contains the data in increasing order. In InVec[0] You | |||
506 | // can find the first 128 bit data. The number of different lanes inside a | |||
507 | // vector depends on the 'VecElems'.In general, the formula is | |||
508 | // VecElems * type / 128. The size of the array 'InVec' depends and equal to | |||
509 | // 'VecElems'. | |||
510 | ||||
511 | // For VecElems = 16 | |||
512 | // Invec[0] - |0| Vec[0] - |0| | |||
513 | // Invec[1] - |1| => Vec[1] - |1| | |||
514 | // Invec[2] - |2| Vec[2] - |2| | |||
515 | ||||
516 | // For VecElems = 32 | |||
517 | // Invec[0] - |0|1| Vec[0] - |0|3| | |||
518 | // Invec[1] - |2|3| => Vec[1] - |1|4| | |||
519 | // Invec[2] - |4|5| Vec[2] - |2|5| | |||
520 | ||||
521 | // For VecElems = 64 | |||
522 | // Invec[0] - |0|1|2 |3 | Vec[0] - |0|3|6|9 | | |||
523 | // Invec[1] - |4|5|6 |7 | => Vec[1] - |1|4|7|10| | |||
524 | // Invec[2] - |8|9|10|11| Vec[2] - |2|5|8|11| | |||
525 | ||||
526 | static void concatSubVector(Value **Vec, ArrayRef<Instruction *> InVec, | |||
527 | unsigned VecElems, IRBuilder<> &Builder) { | |||
528 | if (VecElems == 16) { | |||
529 | for (int i = 0; i < 3; i++) | |||
530 | Vec[i] = InVec[i]; | |||
531 | return; | |||
532 | } | |||
533 | ||||
534 | for (unsigned j = 0; j < VecElems / 32; j++) | |||
535 | for (int i = 0; i < 3; i++) | |||
536 | Vec[i + j * 3] = Builder.CreateShuffleVector( | |||
537 | InVec[j * 6 + i], InVec[j * 6 + i + 3], ArrayRef(Concat, 32)); | |||
538 | ||||
539 | if (VecElems == 32) | |||
540 | return; | |||
541 | ||||
542 | for (int i = 0; i < 3; i++) | |||
543 | Vec[i] = Builder.CreateShuffleVector(Vec[i], Vec[i + 3], Concat); | |||
544 | } | |||
545 | ||||
546 | void X86InterleavedAccessGroup::deinterleave8bitStride3( | |||
547 | ArrayRef<Instruction *> InVec, SmallVectorImpl<Value *> &TransposedMatrix, | |||
548 | unsigned VecElems) { | |||
549 | // Example: Assuming we start from the following vectors: | |||
550 | // Matrix[0]= a0 b0 c0 a1 b1 c1 a2 b2 | |||
551 | // Matrix[1]= c2 a3 b3 c3 a4 b4 c4 a5 | |||
552 | // Matrix[2]= b5 c5 a6 b6 c6 a7 b7 c7 | |||
553 | ||||
554 | TransposedMatrix.resize(3); | |||
555 | SmallVector<int, 32> VPShuf; | |||
556 | SmallVector<int, 32> VPAlign[2]; | |||
557 | SmallVector<int, 32> VPAlign2; | |||
558 | SmallVector<int, 32> VPAlign3; | |||
559 | SmallVector<int, 3> GroupSize; | |||
560 | Value *Vec[6], *TempVector[3]; | |||
561 | ||||
562 | MVT VT = MVT::getVT(Shuffles[0]->getType()); | |||
563 | ||||
564 | createShuffleStride(VT, 3, VPShuf); | |||
565 | setGroupSize(VT, GroupSize); | |||
566 | ||||
567 | for (int i = 0; i < 2; i++) | |||
568 | DecodePALIGNRMask(VT, GroupSize[2 - i], VPAlign[i], false); | |||
569 | ||||
570 | DecodePALIGNRMask(VT, GroupSize[2] + GroupSize[1], VPAlign2, true, true); | |||
571 | DecodePALIGNRMask(VT, GroupSize[1], VPAlign3, true, true); | |||
572 | ||||
573 | concatSubVector(Vec, InVec, VecElems, Builder); | |||
574 | // Vec[0]= a0 a1 a2 b0 b1 b2 c0 c1 | |||
575 | // Vec[1]= c2 c3 c4 a3 a4 a5 b3 b4 | |||
576 | // Vec[2]= b5 b6 b7 c5 c6 c7 a6 a7 | |||
577 | ||||
578 | for (int i = 0; i < 3; i++) | |||
579 | Vec[i] = Builder.CreateShuffleVector(Vec[i], VPShuf); | |||
580 | ||||
581 | // TempVector[0]= a6 a7 a0 a1 a2 b0 b1 b2 | |||
582 | // TempVector[1]= c0 c1 c2 c3 c4 a3 a4 a5 | |||
583 | // TempVector[2]= b3 b4 b5 b6 b7 c5 c6 c7 | |||
584 | ||||
585 | for (int i = 0; i < 3; i++) | |||
586 | TempVector[i] = | |||
587 | Builder.CreateShuffleVector(Vec[(i + 2) % 3], Vec[i], VPAlign[0]); | |||
588 | ||||
589 | // Vec[0]= a3 a4 a5 a6 a7 a0 a1 a2 | |||
590 | // Vec[1]= c5 c6 c7 c0 c1 c2 c3 c4 | |||
591 | // Vec[2]= b0 b1 b2 b3 b4 b5 b6 b7 | |||
592 | ||||
593 | for (int i = 0; i < 3; i++) | |||
594 | Vec[i] = Builder.CreateShuffleVector(TempVector[(i + 1) % 3], TempVector[i], | |||
595 | VPAlign[1]); | |||
596 | ||||
597 | // TransposedMatrix[0]= a0 a1 a2 a3 a4 a5 a6 a7 | |||
598 | // TransposedMatrix[1]= b0 b1 b2 b3 b4 b5 b6 b7 | |||
599 | // TransposedMatrix[2]= c0 c1 c2 c3 c4 c5 c6 c7 | |||
600 | ||||
601 | Value *TempVec = Builder.CreateShuffleVector(Vec[1], VPAlign3); | |||
602 | TransposedMatrix[0] = Builder.CreateShuffleVector(Vec[0], VPAlign2); | |||
603 | TransposedMatrix[1] = VecElems == 8 ? Vec[2] : TempVec; | |||
604 | TransposedMatrix[2] = VecElems == 8 ? TempVec : Vec[2]; | |||
605 | } | |||
606 | ||||
607 | // group2Shuffle reorder the shuffle stride back into continuous order. | |||
608 | // For example For VF16 with Mask1 = {0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13} => | |||
609 | // MaskResult = {0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5}. | |||
610 | static void group2Shuffle(MVT VT, SmallVectorImpl<int> &Mask, | |||
611 | SmallVectorImpl<int> &Output) { | |||
612 | int IndexGroup[3] = {0, 0, 0}; | |||
613 | int Index = 0; | |||
614 | int VectorWidth = VT.getSizeInBits(); | |||
615 | int VF = VT.getVectorNumElements(); | |||
616 | // Find the index of the different groups. | |||
617 | int Lane = (VectorWidth / 128 > 0) ? VectorWidth / 128 : 1; | |||
618 | for (int i = 0; i < 3; i++) { | |||
619 | IndexGroup[(Index * 3) % (VF / Lane)] = Index; | |||
620 | Index += Mask[i]; | |||
621 | } | |||
622 | // According to the index compute the convert mask. | |||
623 | for (int i = 0; i < VF / Lane; i++) { | |||
624 | Output.push_back(IndexGroup[i % 3]); | |||
625 | IndexGroup[i % 3]++; | |||
626 | } | |||
627 | } | |||
628 | ||||
629 | void X86InterleavedAccessGroup::interleave8bitStride3( | |||
630 | ArrayRef<Instruction *> InVec, SmallVectorImpl<Value *> &TransposedMatrix, | |||
631 | unsigned VecElems) { | |||
632 | // Example: Assuming we start from the following vectors: | |||
633 | // Matrix[0]= a0 a1 a2 a3 a4 a5 a6 a7 | |||
634 | // Matrix[1]= b0 b1 b2 b3 b4 b5 b6 b7 | |||
635 | // Matrix[2]= c0 c1 c2 c3 c3 a7 b7 c7 | |||
636 | ||||
637 | TransposedMatrix.resize(3); | |||
638 | SmallVector<int, 3> GroupSize; | |||
639 | SmallVector<int, 32> VPShuf; | |||
640 | SmallVector<int, 32> VPAlign[3]; | |||
641 | SmallVector<int, 32> VPAlign2; | |||
642 | SmallVector<int, 32> VPAlign3; | |||
643 | ||||
644 | Value *Vec[3], *TempVector[3]; | |||
645 | MVT VT = MVT::getVectorVT(MVT::i8, VecElems); | |||
646 | ||||
647 | setGroupSize(VT, GroupSize); | |||
648 | ||||
649 | for (int i = 0; i < 3; i++) | |||
650 | DecodePALIGNRMask(VT, GroupSize[i], VPAlign[i]); | |||
651 | ||||
652 | DecodePALIGNRMask(VT, GroupSize[1] + GroupSize[2], VPAlign2, false, true); | |||
653 | DecodePALIGNRMask(VT, GroupSize[1], VPAlign3, false, true); | |||
654 | ||||
655 | // Vec[0]= a3 a4 a5 a6 a7 a0 a1 a2 | |||
656 | // Vec[1]= c5 c6 c7 c0 c1 c2 c3 c4 | |||
657 | // Vec[2]= b0 b1 b2 b3 b4 b5 b6 b7 | |||
658 | ||||
659 | Vec[0] = Builder.CreateShuffleVector(InVec[0], VPAlign2); | |||
660 | Vec[1] = Builder.CreateShuffleVector(InVec[1], VPAlign3); | |||
661 | Vec[2] = InVec[2]; | |||
662 | ||||
663 | // Vec[0]= a6 a7 a0 a1 a2 b0 b1 b2 | |||
664 | // Vec[1]= c0 c1 c2 c3 c4 a3 a4 a5 | |||
665 | // Vec[2]= b3 b4 b5 b6 b7 c5 c6 c7 | |||
666 | ||||
667 | for (int i = 0; i < 3; i++) | |||
668 | TempVector[i] = | |||
669 | Builder.CreateShuffleVector(Vec[i], Vec[(i + 2) % 3], VPAlign[1]); | |||
670 | ||||
671 | // Vec[0]= a0 a1 a2 b0 b1 b2 c0 c1 | |||
672 | // Vec[1]= c2 c3 c4 a3 a4 a5 b3 b4 | |||
673 | // Vec[2]= b5 b6 b7 c5 c6 c7 a6 a7 | |||
674 | ||||
675 | for (int i = 0; i < 3; i++) | |||
676 | Vec[i] = Builder.CreateShuffleVector(TempVector[i], TempVector[(i + 1) % 3], | |||
677 | VPAlign[2]); | |||
678 | ||||
679 | // TransposedMatrix[0] = a0 b0 c0 a1 b1 c1 a2 b2 | |||
680 | // TransposedMatrix[1] = c2 a3 b3 c3 a4 b4 c4 a5 | |||
681 | // TransposedMatrix[2] = b5 c5 a6 b6 c6 a7 b7 c7 | |||
682 | ||||
683 | unsigned NumOfElm = VT.getVectorNumElements(); | |||
684 | group2Shuffle(VT, GroupSize, VPShuf); | |||
685 | reorderSubVector(VT, TransposedMatrix, Vec, VPShuf, NumOfElm, 3, Builder); | |||
686 | } | |||
687 | ||||
688 | void X86InterleavedAccessGroup::transpose_4x4( | |||
689 | ArrayRef<Instruction *> Matrix, | |||
690 | SmallVectorImpl<Value *> &TransposedMatrix) { | |||
691 | assert(Matrix.size() == 4 && "Invalid matrix size")(static_cast <bool> (Matrix.size() == 4 && "Invalid matrix size" ) ? void (0) : __assert_fail ("Matrix.size() == 4 && \"Invalid matrix size\"" , "llvm/lib/Target/X86/X86InterleavedAccess.cpp", 691, __extension__ __PRETTY_FUNCTION__)); | |||
692 | TransposedMatrix.resize(4); | |||
693 | ||||
694 | // dst = src1[0,1],src2[0,1] | |||
695 | static constexpr int IntMask1[] = {0, 1, 4, 5}; | |||
696 | ArrayRef<int> Mask = ArrayRef(IntMask1, 4); | |||
697 | Value *IntrVec1 = Builder.CreateShuffleVector(Matrix[0], Matrix[2], Mask); | |||
698 | Value *IntrVec2 = Builder.CreateShuffleVector(Matrix[1], Matrix[3], Mask); | |||
699 | ||||
700 | // dst = src1[2,3],src2[2,3] | |||
701 | static constexpr int IntMask2[] = {2, 3, 6, 7}; | |||
702 | Mask = ArrayRef(IntMask2, 4); | |||
703 | Value *IntrVec3 = Builder.CreateShuffleVector(Matrix[0], Matrix[2], Mask); | |||
704 | Value *IntrVec4 = Builder.CreateShuffleVector(Matrix[1], Matrix[3], Mask); | |||
705 | ||||
706 | // dst = src1[0],src2[0],src1[2],src2[2] | |||
707 | static constexpr int IntMask3[] = {0, 4, 2, 6}; | |||
708 | Mask = ArrayRef(IntMask3, 4); | |||
709 | TransposedMatrix[0] = Builder.CreateShuffleVector(IntrVec1, IntrVec2, Mask); | |||
710 | TransposedMatrix[2] = Builder.CreateShuffleVector(IntrVec3, IntrVec4, Mask); | |||
711 | ||||
712 | // dst = src1[1],src2[1],src1[3],src2[3] | |||
713 | static constexpr int IntMask4[] = {1, 5, 3, 7}; | |||
714 | Mask = ArrayRef(IntMask4, 4); | |||
715 | TransposedMatrix[1] = Builder.CreateShuffleVector(IntrVec1, IntrVec2, Mask); | |||
716 | TransposedMatrix[3] = Builder.CreateShuffleVector(IntrVec3, IntrVec4, Mask); | |||
717 | } | |||
718 | ||||
719 | // Lowers this interleaved access group into X86-specific | |||
720 | // instructions/intrinsics. | |||
721 | bool X86InterleavedAccessGroup::lowerIntoOptimizedSequence() { | |||
722 | SmallVector<Instruction *, 4> DecomposedVectors; | |||
723 | SmallVector<Value *, 4> TransposedVectors; | |||
724 | auto *ShuffleTy = cast<FixedVectorType>(Shuffles[0]->getType()); | |||
725 | ||||
726 | if (isa<LoadInst>(Inst)) { | |||
727 | auto *ShuffleEltTy = cast<FixedVectorType>(Inst->getType()); | |||
728 | unsigned NumSubVecElems = ShuffleEltTy->getNumElements() / Factor; | |||
729 | switch (NumSubVecElems) { | |||
730 | default: | |||
731 | return false; | |||
732 | case 4: | |||
733 | case 8: | |||
734 | case 16: | |||
735 | case 32: | |||
736 | case 64: | |||
737 | if (ShuffleTy->getNumElements() != NumSubVecElems) | |||
738 | return false; | |||
739 | break; | |||
740 | } | |||
741 | ||||
742 | // Try to generate target-sized register(/instruction). | |||
743 | decompose(Inst, Factor, ShuffleTy, DecomposedVectors); | |||
744 | ||||
745 | // Perform matrix-transposition in order to compute interleaved | |||
746 | // results by generating some sort of (optimized) target-specific | |||
747 | // instructions. | |||
748 | ||||
749 | if (NumSubVecElems == 4) | |||
750 | transpose_4x4(DecomposedVectors, TransposedVectors); | |||
751 | else | |||
752 | deinterleave8bitStride3(DecomposedVectors, TransposedVectors, | |||
753 | NumSubVecElems); | |||
754 | ||||
755 | // Now replace the unoptimized-interleaved-vectors with the | |||
756 | // transposed-interleaved vectors. | |||
757 | for (unsigned i = 0, e = Shuffles.size(); i < e; ++i) | |||
758 | Shuffles[i]->replaceAllUsesWith(TransposedVectors[Indices[i]]); | |||
759 | ||||
760 | return true; | |||
761 | } | |||
762 | ||||
763 | Type *ShuffleEltTy = ShuffleTy->getElementType(); | |||
764 | unsigned NumSubVecElems = ShuffleTy->getNumElements() / Factor; | |||
765 | ||||
766 | // Lower the interleaved stores: | |||
767 | // 1. Decompose the interleaved wide shuffle into individual shuffle | |||
768 | // vectors. | |||
769 | decompose(Shuffles[0], Factor, | |||
770 | FixedVectorType::get(ShuffleEltTy, NumSubVecElems), | |||
771 | DecomposedVectors); | |||
772 | ||||
773 | // 2. Transpose the interleaved-vectors into vectors of contiguous | |||
774 | // elements. | |||
775 | switch (NumSubVecElems) { | |||
776 | case 4: | |||
777 | transpose_4x4(DecomposedVectors, TransposedVectors); | |||
778 | break; | |||
779 | case 8: | |||
780 | interleave8bitStride4VF8(DecomposedVectors, TransposedVectors); | |||
781 | break; | |||
782 | case 16: | |||
783 | case 32: | |||
784 | case 64: | |||
785 | if (Factor == 4) | |||
786 | interleave8bitStride4(DecomposedVectors, TransposedVectors, | |||
787 | NumSubVecElems); | |||
788 | if (Factor == 3) | |||
789 | interleave8bitStride3(DecomposedVectors, TransposedVectors, | |||
790 | NumSubVecElems); | |||
791 | break; | |||
792 | default: | |||
793 | return false; | |||
794 | } | |||
795 | ||||
796 | // 3. Concatenate the contiguous-vectors back into a wide vector. | |||
797 | Value *WideVec = concatenateVectors(Builder, TransposedVectors); | |||
798 | ||||
799 | // 4. Generate a store instruction for wide-vec. | |||
800 | StoreInst *SI = cast<StoreInst>(Inst); | |||
801 | Builder.CreateAlignedStore(WideVec, SI->getPointerOperand(), SI->getAlign()); | |||
802 | ||||
803 | return true; | |||
804 | } | |||
805 | ||||
806 | // Lower interleaved load(s) into target specific instructions/ | |||
807 | // intrinsics. Lowering sequence varies depending on the vector-types, factor, | |||
808 | // number of shuffles and ISA. | |||
809 | // Currently, lowering is supported for 4x64 bits with Factor = 4 on AVX. | |||
810 | bool X86TargetLowering::lowerInterleavedLoad( | |||
811 | LoadInst *LI, ArrayRef<ShuffleVectorInst *> Shuffles, | |||
812 | ArrayRef<unsigned> Indices, unsigned Factor) const { | |||
813 | assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&(static_cast <bool> (Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && "Invalid interleave factor" ) ? void (0) : __assert_fail ("Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && \"Invalid interleave factor\"" , "llvm/lib/Target/X86/X86InterleavedAccess.cpp", 814, __extension__ __PRETTY_FUNCTION__)) | |||
814 | "Invalid interleave factor")(static_cast <bool> (Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && "Invalid interleave factor" ) ? void (0) : __assert_fail ("Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && \"Invalid interleave factor\"" , "llvm/lib/Target/X86/X86InterleavedAccess.cpp", 814, __extension__ __PRETTY_FUNCTION__)); | |||
815 | assert(!Shuffles.empty() && "Empty shufflevector input")(static_cast <bool> (!Shuffles.empty() && "Empty shufflevector input" ) ? void (0) : __assert_fail ("!Shuffles.empty() && \"Empty shufflevector input\"" , "llvm/lib/Target/X86/X86InterleavedAccess.cpp", 815, __extension__ __PRETTY_FUNCTION__)); | |||
816 | assert(Shuffles.size() == Indices.size() &&(static_cast <bool> (Shuffles.size() == Indices.size() && "Unmatched number of shufflevectors and indices") ? void (0) : __assert_fail ("Shuffles.size() == Indices.size() && \"Unmatched number of shufflevectors and indices\"" , "llvm/lib/Target/X86/X86InterleavedAccess.cpp", 817, __extension__ __PRETTY_FUNCTION__)) | |||
817 | "Unmatched number of shufflevectors and indices")(static_cast <bool> (Shuffles.size() == Indices.size() && "Unmatched number of shufflevectors and indices") ? void (0) : __assert_fail ("Shuffles.size() == Indices.size() && \"Unmatched number of shufflevectors and indices\"" , "llvm/lib/Target/X86/X86InterleavedAccess.cpp", 817, __extension__ __PRETTY_FUNCTION__)); | |||
818 | ||||
819 | // Create an interleaved access group. | |||
820 | IRBuilder<> Builder(LI); | |||
821 | X86InterleavedAccessGroup Grp(LI, Shuffles, Indices, Factor, Subtarget, | |||
822 | Builder); | |||
823 | ||||
824 | return Grp.isSupported() && Grp.lowerIntoOptimizedSequence(); | |||
825 | } | |||
826 | ||||
827 | bool X86TargetLowering::lowerInterleavedStore(StoreInst *SI, | |||
828 | ShuffleVectorInst *SVI, | |||
829 | unsigned Factor) const { | |||
830 | assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&(static_cast <bool> (Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && "Invalid interleave factor" ) ? void (0) : __assert_fail ("Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && \"Invalid interleave factor\"" , "llvm/lib/Target/X86/X86InterleavedAccess.cpp", 831, __extension__ __PRETTY_FUNCTION__)) | |||
831 | "Invalid interleave factor")(static_cast <bool> (Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && "Invalid interleave factor" ) ? void (0) : __assert_fail ("Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && \"Invalid interleave factor\"" , "llvm/lib/Target/X86/X86InterleavedAccess.cpp", 831, __extension__ __PRETTY_FUNCTION__)); | |||
832 | ||||
833 | assert(cast<FixedVectorType>(SVI->getType())->getNumElements() % Factor ==(static_cast <bool> (cast<FixedVectorType>(SVI-> getType())->getNumElements() % Factor == 0 && "Invalid interleaved store" ) ? void (0) : __assert_fail ("cast<FixedVectorType>(SVI->getType())->getNumElements() % Factor == 0 && \"Invalid interleaved store\"" , "llvm/lib/Target/X86/X86InterleavedAccess.cpp", 835, __extension__ __PRETTY_FUNCTION__)) | |||
834 | 0 &&(static_cast <bool> (cast<FixedVectorType>(SVI-> getType())->getNumElements() % Factor == 0 && "Invalid interleaved store" ) ? void (0) : __assert_fail ("cast<FixedVectorType>(SVI->getType())->getNumElements() % Factor == 0 && \"Invalid interleaved store\"" , "llvm/lib/Target/X86/X86InterleavedAccess.cpp", 835, __extension__ __PRETTY_FUNCTION__)) | |||
835 | "Invalid interleaved store")(static_cast <bool> (cast<FixedVectorType>(SVI-> getType())->getNumElements() % Factor == 0 && "Invalid interleaved store" ) ? void (0) : __assert_fail ("cast<FixedVectorType>(SVI->getType())->getNumElements() % Factor == 0 && \"Invalid interleaved store\"" , "llvm/lib/Target/X86/X86InterleavedAccess.cpp", 835, __extension__ __PRETTY_FUNCTION__)); | |||
836 | ||||
837 | // Holds the indices of SVI that correspond to the starting index of each | |||
838 | // interleaved shuffle. | |||
839 | SmallVector<unsigned, 4> Indices; | |||
840 | auto Mask = SVI->getShuffleMask(); | |||
841 | for (unsigned i = 0; i < Factor; i++) | |||
842 | Indices.push_back(Mask[i]); | |||
843 | ||||
844 | ArrayRef<ShuffleVectorInst *> Shuffles = ArrayRef(SVI); | |||
845 | ||||
846 | // Create an interleaved access group. | |||
847 | IRBuilder<> Builder(SI); | |||
848 | X86InterleavedAccessGroup Grp(SI, Shuffles, Indices, Factor, Subtarget, | |||
849 | Builder); | |||
850 | ||||
851 | return Grp.isSupported() && Grp.lowerIntoOptimizedSequence(); | |||
852 | } |