LLVM  7.0.0svn
AutoUpgrade.cpp
Go to the documentation of this file.
1 //===-- AutoUpgrade.cpp - Implement auto-upgrade helper functions ---------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the auto-upgrade helper functions.
11 // This is where deprecated IR intrinsics and other IR features are updated to
12 // current specifications.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "llvm/IR/AutoUpgrade.h"
17 #include "llvm/ADT/StringSwitch.h"
18 #include "llvm/IR/Constants.h"
19 #include "llvm/IR/DIBuilder.h"
20 #include "llvm/IR/DebugInfo.h"
21 #include "llvm/IR/DiagnosticInfo.h"
22 #include "llvm/IR/Function.h"
23 #include "llvm/IR/IRBuilder.h"
24 #include "llvm/IR/Instruction.h"
25 #include "llvm/IR/IntrinsicInst.h"
26 #include "llvm/IR/LLVMContext.h"
27 #include "llvm/IR/Module.h"
28 #include "llvm/IR/Verifier.h"
30 #include "llvm/Support/Regex.h"
31 #include <cstring>
32 using namespace llvm;
33 
34 static void rename(GlobalValue *GV) { GV->setName(GV->getName() + ".old"); }
35 
36 // Upgrade the declarations of the SSE4.1 ptest intrinsics whose arguments have
37 // changed their type from v4f32 to v2i64.
39  Function *&NewFn) {
40  // Check whether this is an old version of the function, which received
41  // v4f32 arguments.
42  Type *Arg0Type = F->getFunctionType()->getParamType(0);
43  if (Arg0Type != VectorType::get(Type::getFloatTy(F->getContext()), 4))
44  return false;
45 
46  // Yes, it's old, replace it with new version.
47  rename(F);
48  NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
49  return true;
50 }
51 
52 // Upgrade the declarations of intrinsic functions whose 8-bit immediate mask
53 // arguments have changed their type from i32 to i8.
55  Function *&NewFn) {
56  // Check that the last argument is an i32.
57  Type *LastArgType = F->getFunctionType()->getParamType(
58  F->getFunctionType()->getNumParams() - 1);
59  if (!LastArgType->isIntegerTy(32))
60  return false;
61 
62  // Move this function aside and map down.
63  rename(F);
64  NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
65  return true;
66 }
67 
68 // Upgrade the declaration of fp compare intrinsics that change return type
69 // from scalar to vXi1 mask.
71  Function *&NewFn) {
72  // Check if the return type is a vector.
73  if (F->getReturnType()->isVectorTy())
74  return false;
75 
76  rename(F);
77  NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
78  return true;
79 }
80 
82  // All of the intrinsics matches below should be marked with which llvm
83  // version started autoupgrading them. At some point in the future we would
84  // like to use this information to remove upgrade code for some older
85  // intrinsics. It is currently undecided how we will determine that future
86  // point.
87  if (Name=="ssse3.pabs.b.128" || // Added in 6.0
88  Name=="ssse3.pabs.w.128" || // Added in 6.0
89  Name=="ssse3.pabs.d.128" || // Added in 6.0
90  Name.startswith("fma.vfmsub.") || // Added in 7.0
91  Name.startswith("fma.vfmsubadd.") || // Added in 7.0
92  Name.startswith("fma.vfnmadd.") || // Added in 7.0
93  Name.startswith("fma.vfnmsub.") || // Added in 7.0
94  Name.startswith("avx512.mask.shuf.i") || // Added in 6.0
95  Name.startswith("avx512.mask.shuf.f") || // Added in 6.0
96  Name.startswith("avx512.kunpck") || //added in 6.0
97  Name.startswith("avx2.pabs.") || // Added in 6.0
98  Name.startswith("avx512.mask.pabs.") || // Added in 6.0
99  Name.startswith("avx512.broadcastm") || // Added in 6.0
100  Name == "sse.sqrt.ss" || // Added in 7.0
101  Name == "sse2.sqrt.sd" || // Added in 7.0
102  Name == "avx512.mask.sqrt.ps.128" || // Added in 7.0
103  Name == "avx512.mask.sqrt.ps.256" || // Added in 7.0
104  Name == "avx512.mask.sqrt.pd.128" || // Added in 7.0
105  Name == "avx512.mask.sqrt.pd.256" || // Added in 7.0
106  Name.startswith("avx.sqrt.p") || // Added in 7.0
107  Name.startswith("sse2.sqrt.p") || // Added in 7.0
108  Name.startswith("sse.sqrt.p") || // Added in 7.0
109  Name.startswith("avx512.mask.pbroadcast") || // Added in 6.0
110  Name.startswith("sse2.pcmpeq.") || // Added in 3.1
111  Name.startswith("sse2.pcmpgt.") || // Added in 3.1
112  Name.startswith("avx2.pcmpeq.") || // Added in 3.1
113  Name.startswith("avx2.pcmpgt.") || // Added in 3.1
114  Name.startswith("avx512.mask.pcmpeq.") || // Added in 3.9
115  Name.startswith("avx512.mask.pcmpgt.") || // Added in 3.9
116  Name.startswith("avx.vperm2f128.") || // Added in 6.0
117  Name == "avx2.vperm2i128" || // Added in 6.0
118  Name == "sse.add.ss" || // Added in 4.0
119  Name == "sse2.add.sd" || // Added in 4.0
120  Name == "sse.sub.ss" || // Added in 4.0
121  Name == "sse2.sub.sd" || // Added in 4.0
122  Name == "sse.mul.ss" || // Added in 4.0
123  Name == "sse2.mul.sd" || // Added in 4.0
124  Name == "sse.div.ss" || // Added in 4.0
125  Name == "sse2.div.sd" || // Added in 4.0
126  Name == "sse41.pmaxsb" || // Added in 3.9
127  Name == "sse2.pmaxs.w" || // Added in 3.9
128  Name == "sse41.pmaxsd" || // Added in 3.9
129  Name == "sse2.pmaxu.b" || // Added in 3.9
130  Name == "sse41.pmaxuw" || // Added in 3.9
131  Name == "sse41.pmaxud" || // Added in 3.9
132  Name == "sse41.pminsb" || // Added in 3.9
133  Name == "sse2.pmins.w" || // Added in 3.9
134  Name == "sse41.pminsd" || // Added in 3.9
135  Name == "sse2.pminu.b" || // Added in 3.9
136  Name == "sse41.pminuw" || // Added in 3.9
137  Name == "sse41.pminud" || // Added in 3.9
138  Name == "avx512.kand.w" || // Added in 7.0
139  Name == "avx512.kandn.w" || // Added in 7.0
140  Name == "avx512.knot.w" || // Added in 7.0
141  Name == "avx512.kor.w" || // Added in 7.0
142  Name == "avx512.kxor.w" || // Added in 7.0
143  Name == "avx512.kxnor.w" || // Added in 7.0
144  Name == "avx512.kortestc.w" || // Added in 7.0
145  Name == "avx512.kortestz.w" || // Added in 7.0
146  Name.startswith("avx512.mask.pshuf.b.") || // Added in 4.0
147  Name.startswith("avx2.pmax") || // Added in 3.9
148  Name.startswith("avx2.pmin") || // Added in 3.9
149  Name.startswith("avx512.mask.pmax") || // Added in 4.0
150  Name.startswith("avx512.mask.pmin") || // Added in 4.0
151  Name.startswith("avx2.vbroadcast") || // Added in 3.8
152  Name.startswith("avx2.pbroadcast") || // Added in 3.8
153  Name.startswith("avx.vpermil.") || // Added in 3.1
154  Name.startswith("sse2.pshuf") || // Added in 3.9
155  Name.startswith("avx512.pbroadcast") || // Added in 3.9
156  Name.startswith("avx512.mask.broadcast.s") || // Added in 3.9
157  Name.startswith("avx512.mask.movddup") || // Added in 3.9
158  Name.startswith("avx512.mask.movshdup") || // Added in 3.9
159  Name.startswith("avx512.mask.movsldup") || // Added in 3.9
160  Name.startswith("avx512.mask.pshuf.d.") || // Added in 3.9
161  Name.startswith("avx512.mask.pshufl.w.") || // Added in 3.9
162  Name.startswith("avx512.mask.pshufh.w.") || // Added in 3.9
163  Name.startswith("avx512.mask.shuf.p") || // Added in 4.0
164  Name.startswith("avx512.mask.vpermil.p") || // Added in 3.9
165  Name.startswith("avx512.mask.perm.df.") || // Added in 3.9
166  Name.startswith("avx512.mask.perm.di.") || // Added in 3.9
167  Name.startswith("avx512.mask.punpckl") || // Added in 3.9
168  Name.startswith("avx512.mask.punpckh") || // Added in 3.9
169  Name.startswith("avx512.mask.unpckl.") || // Added in 3.9
170  Name.startswith("avx512.mask.unpckh.") || // Added in 3.9
171  Name.startswith("avx512.mask.pand.") || // Added in 3.9
172  Name.startswith("avx512.mask.pandn.") || // Added in 3.9
173  Name.startswith("avx512.mask.por.") || // Added in 3.9
174  Name.startswith("avx512.mask.pxor.") || // Added in 3.9
175  Name.startswith("avx512.mask.and.") || // Added in 3.9
176  Name.startswith("avx512.mask.andn.") || // Added in 3.9
177  Name.startswith("avx512.mask.or.") || // Added in 3.9
178  Name.startswith("avx512.mask.xor.") || // Added in 3.9
179  Name.startswith("avx512.mask.padd.") || // Added in 4.0
180  Name.startswith("avx512.mask.psub.") || // Added in 4.0
181  Name.startswith("avx512.mask.pmull.") || // Added in 4.0
182  Name.startswith("avx512.mask.cvtdq2pd.") || // Added in 4.0
183  Name.startswith("avx512.mask.cvtudq2pd.") || // Added in 4.0
184  Name == "avx512.mask.cvtudq2ps.128" || // Added in 7.0
185  Name == "avx512.mask.cvtudq2ps.256" || // Added in 7.0
186  Name == "avx512.mask.cvtqq2pd.128" || // Added in 7.0
187  Name == "avx512.mask.cvtqq2pd.256" || // Added in 7.0
188  Name == "avx512.mask.cvtuqq2pd.128" || // Added in 7.0
189  Name == "avx512.mask.cvtuqq2pd.256" || // Added in 7.0
190  Name == "avx512.mask.cvtdq2ps.128" || // Added in 7.0
191  Name == "avx512.mask.cvtdq2ps.256" || // Added in 7.0
192  Name == "avx512.mask.cvtpd2dq.256" || // Added in 7.0
193  Name == "avx512.mask.cvtpd2ps.256" || // Added in 7.0
194  Name == "avx512.mask.cvttpd2dq.256" || // Added in 7.0
195  Name == "avx512.mask.cvttps2dq.128" || // Added in 7.0
196  Name == "avx512.mask.cvttps2dq.256" || // Added in 7.0
197  Name == "avx512.mask.cvtps2pd.128" || // Added in 7.0
198  Name == "avx512.mask.cvtps2pd.256" || // Added in 7.0
199  Name == "avx512.cvtusi2sd" || // Added in 7.0
200  Name.startswith("avx512.mask.permvar.") || // Added in 7.0
201  Name.startswith("avx512.mask.permvar.") || // Added in 7.0
202  Name == "sse2.pmulu.dq" || // Added in 7.0
203  Name == "sse41.pmuldq" || // Added in 7.0
204  Name == "avx2.pmulu.dq" || // Added in 7.0
205  Name == "avx2.pmul.dq" || // Added in 7.0
206  Name == "avx512.pmulu.dq.512" || // Added in 7.0
207  Name == "avx512.pmul.dq.512" || // Added in 7.0
208  Name.startswith("avx512.mask.pmul.dq.") || // Added in 4.0
209  Name.startswith("avx512.mask.pmulu.dq.") || // Added in 4.0
210  Name.startswith("avx512.mask.pmul.hr.sw.") || // Added in 7.0
211  Name.startswith("avx512.mask.pmulh.w.") || // Added in 7.0
212  Name.startswith("avx512.mask.pmulhu.w.") || // Added in 7.0
213  Name.startswith("avx512.mask.pmaddw.d.") || // Added in 7.0
214  Name.startswith("avx512.mask.pmaddubs.w.") || // Added in 7.0
215  Name.startswith("avx512.mask.packsswb.") || // Added in 5.0
216  Name.startswith("avx512.mask.packssdw.") || // Added in 5.0
217  Name.startswith("avx512.mask.packuswb.") || // Added in 5.0
218  Name.startswith("avx512.mask.packusdw.") || // Added in 5.0
219  Name.startswith("avx512.mask.cmp.b") || // Added in 5.0
220  Name.startswith("avx512.mask.cmp.d") || // Added in 5.0
221  Name.startswith("avx512.mask.cmp.q") || // Added in 5.0
222  Name.startswith("avx512.mask.cmp.w") || // Added in 5.0
223  Name.startswith("avx512.mask.ucmp.") || // Added in 5.0
224  Name.startswith("avx512.cvtb2mask.") || // Added in 7.0
225  Name.startswith("avx512.cvtw2mask.") || // Added in 7.0
226  Name.startswith("avx512.cvtd2mask.") || // Added in 7.0
227  Name.startswith("avx512.cvtq2mask.") || // Added in 7.0
228  Name.startswith("avx512.mask.vpermilvar.") || // Added in 4.0
229  Name.startswith("avx512.mask.psll.d") || // Added in 4.0
230  Name.startswith("avx512.mask.psll.q") || // Added in 4.0
231  Name.startswith("avx512.mask.psll.w") || // Added in 4.0
232  Name.startswith("avx512.mask.psra.d") || // Added in 4.0
233  Name.startswith("avx512.mask.psra.q") || // Added in 4.0
234  Name.startswith("avx512.mask.psra.w") || // Added in 4.0
235  Name.startswith("avx512.mask.psrl.d") || // Added in 4.0
236  Name.startswith("avx512.mask.psrl.q") || // Added in 4.0
237  Name.startswith("avx512.mask.psrl.w") || // Added in 4.0
238  Name.startswith("avx512.mask.pslli") || // Added in 4.0
239  Name.startswith("avx512.mask.psrai") || // Added in 4.0
240  Name.startswith("avx512.mask.psrli") || // Added in 4.0
241  Name.startswith("avx512.mask.psllv") || // Added in 4.0
242  Name.startswith("avx512.mask.psrav") || // Added in 4.0
243  Name.startswith("avx512.mask.psrlv") || // Added in 4.0
244  Name.startswith("sse41.pmovsx") || // Added in 3.8
245  Name.startswith("sse41.pmovzx") || // Added in 3.9
246  Name.startswith("avx2.pmovsx") || // Added in 3.9
247  Name.startswith("avx2.pmovzx") || // Added in 3.9
248  Name.startswith("avx512.mask.pmovsx") || // Added in 4.0
249  Name.startswith("avx512.mask.pmovzx") || // Added in 4.0
250  Name.startswith("avx512.mask.lzcnt.") || // Added in 5.0
251  Name.startswith("avx512.mask.pternlog.") || // Added in 7.0
252  Name.startswith("avx512.maskz.pternlog.") || // Added in 7.0
253  Name.startswith("avx512.mask.vpmadd52") || // Added in 7.0
254  Name.startswith("avx512.maskz.vpmadd52") || // Added in 7.0
255  Name.startswith("avx512.mask.vpermi2var.") || // Added in 7.0
256  Name.startswith("avx512.mask.vpermt2var.") || // Added in 7.0
257  Name.startswith("avx512.maskz.vpermt2var.") || // Added in 7.0
258  Name.startswith("avx512.mask.vpdpbusd.") || // Added in 7.0
259  Name.startswith("avx512.maskz.vpdpbusd.") || // Added in 7.0
260  Name.startswith("avx512.mask.vpdpbusds.") || // Added in 7.0
261  Name.startswith("avx512.maskz.vpdpbusds.") || // Added in 7.0
262  Name.startswith("avx512.mask.vpdpwssd.") || // Added in 7.0
263  Name.startswith("avx512.maskz.vpdpwssd.") || // Added in 7.0
264  Name.startswith("avx512.mask.vpdpwssds.") || // Added in 7.0
265  Name.startswith("avx512.maskz.vpdpwssds.") || // Added in 7.0
266  Name.startswith("avx512.mask.dbpsadbw.") || // Added in 7.0
267  Name.startswith("avx512.mask.vpshld.") || // Added in 7.0
268  Name.startswith("avx512.mask.vpshrd.") || // Added in 7.0
269  Name.startswith("avx512.mask.add.p") || // Added in 7.0. 128/256 in 4.0
270  Name.startswith("avx512.mask.sub.p") || // Added in 7.0. 128/256 in 4.0
271  Name.startswith("avx512.mask.mul.p") || // Added in 7.0. 128/256 in 4.0
272  Name.startswith("avx512.mask.div.p") || // Added in 7.0. 128/256 in 4.0
273  Name.startswith("avx512.mask.max.p") || // Added in 7.0. 128/256 in 5.0
274  Name.startswith("avx512.mask.min.p") || // Added in 7.0. 128/256 in 5.0
275  Name == "sse.cvtsi2ss" || // Added in 7.0
276  Name == "sse.cvtsi642ss" || // Added in 7.0
277  Name == "sse2.cvtsi2sd" || // Added in 7.0
278  Name == "sse2.cvtsi642sd" || // Added in 7.0
279  Name == "sse2.cvtss2sd" || // Added in 7.0
280  Name == "sse2.cvtdq2pd" || // Added in 3.9
281  Name == "sse2.cvtdq2ps" || // Added in 7.0
282  Name == "sse2.cvtps2pd" || // Added in 3.9
283  Name == "avx.cvtdq2.pd.256" || // Added in 3.9
284  Name == "avx.cvtdq2.ps.256" || // Added in 7.0
285  Name == "avx.cvt.ps2.pd.256" || // Added in 3.9
286  Name.startswith("avx.vinsertf128.") || // Added in 3.7
287  Name == "avx2.vinserti128" || // Added in 3.7
288  Name.startswith("avx512.mask.insert") || // Added in 4.0
289  Name.startswith("avx.vextractf128.") || // Added in 3.7
290  Name == "avx2.vextracti128" || // Added in 3.7
291  Name.startswith("avx512.mask.vextract") || // Added in 4.0
292  Name.startswith("sse4a.movnt.") || // Added in 3.9
293  Name.startswith("avx.movnt.") || // Added in 3.2
294  Name.startswith("avx512.storent.") || // Added in 3.9
295  Name == "sse41.movntdqa" || // Added in 5.0
296  Name == "avx2.movntdqa" || // Added in 5.0
297  Name == "avx512.movntdqa" || // Added in 5.0
298  Name == "sse2.storel.dq" || // Added in 3.9
299  Name.startswith("sse.storeu.") || // Added in 3.9
300  Name.startswith("sse2.storeu.") || // Added in 3.9
301  Name.startswith("avx.storeu.") || // Added in 3.9
302  Name.startswith("avx512.mask.storeu.") || // Added in 3.9
303  Name.startswith("avx512.mask.store.p") || // Added in 3.9
304  Name.startswith("avx512.mask.store.b.") || // Added in 3.9
305  Name.startswith("avx512.mask.store.w.") || // Added in 3.9
306  Name.startswith("avx512.mask.store.d.") || // Added in 3.9
307  Name.startswith("avx512.mask.store.q.") || // Added in 3.9
308  Name == "avx512.mask.store.ss" || // Added in 7.0
309  Name.startswith("avx512.mask.loadu.") || // Added in 3.9
310  Name.startswith("avx512.mask.load.") || // Added in 3.9
311  Name.startswith("avx512.mask.expand.load.") || // Added in 7.0
312  Name.startswith("avx512.mask.compress.store.") || // Added in 7.0
313  Name == "sse42.crc32.64.8" || // Added in 3.4
314  Name.startswith("avx.vbroadcast.s") || // Added in 3.5
315  Name.startswith("avx512.vbroadcast.s") || // Added in 7.0
316  Name.startswith("avx512.mask.palignr.") || // Added in 3.9
317  Name.startswith("avx512.mask.valign.") || // Added in 4.0
318  Name.startswith("sse2.psll.dq") || // Added in 3.7
319  Name.startswith("sse2.psrl.dq") || // Added in 3.7
320  Name.startswith("avx2.psll.dq") || // Added in 3.7
321  Name.startswith("avx2.psrl.dq") || // Added in 3.7
322  Name.startswith("avx512.psll.dq") || // Added in 3.9
323  Name.startswith("avx512.psrl.dq") || // Added in 3.9
324  Name == "sse41.pblendw" || // Added in 3.7
325  Name.startswith("sse41.blendp") || // Added in 3.7
326  Name.startswith("avx.blend.p") || // Added in 3.7
327  Name == "avx2.pblendw" || // Added in 3.7
328  Name.startswith("avx2.pblendd.") || // Added in 3.7
329  Name.startswith("avx.vbroadcastf128") || // Added in 4.0
330  Name == "avx2.vbroadcasti128" || // Added in 3.7
331  Name.startswith("avx512.mask.broadcastf") || // Added in 6.0
332  Name.startswith("avx512.mask.broadcasti") || // Added in 6.0
333  Name == "xop.vpcmov" || // Added in 3.8
334  Name == "xop.vpcmov.256" || // Added in 5.0
335  Name.startswith("avx512.mask.move.s") || // Added in 4.0
336  Name.startswith("avx512.cvtmask2") || // Added in 5.0
337  (Name.startswith("xop.vpcom") && // Added in 3.2
338  F->arg_size() == 2) ||
339  Name.startswith("avx512.ptestm") || //Added in 6.0
340  Name.startswith("avx512.ptestnm") || //Added in 6.0
341  Name.startswith("sse2.pavg") || // Added in 6.0
342  Name.startswith("avx2.pavg") || // Added in 6.0
343  Name.startswith("avx512.mask.pavg")) // Added in 6.0
344  return true;
345 
346  return false;
347 }
348 
350  Function *&NewFn) {
351  // Only handle intrinsics that start with "x86.".
352  if (!Name.startswith("x86."))
353  return false;
354  // Remove "x86." prefix.
355  Name = Name.substr(4);
356 
357  if (ShouldUpgradeX86Intrinsic(F, Name)) {
358  NewFn = nullptr;
359  return true;
360  }
361 
362  // SSE4.1 ptest functions may have an old signature.
363  if (Name.startswith("sse41.ptest")) { // Added in 3.2
364  if (Name.substr(11) == "c")
365  return UpgradePTESTIntrinsic(F, Intrinsic::x86_sse41_ptestc, NewFn);
366  if (Name.substr(11) == "z")
367  return UpgradePTESTIntrinsic(F, Intrinsic::x86_sse41_ptestz, NewFn);
368  if (Name.substr(11) == "nzc")
369  return UpgradePTESTIntrinsic(F, Intrinsic::x86_sse41_ptestnzc, NewFn);
370  }
371  // Several blend and other instructions with masks used the wrong number of
372  // bits.
373  if (Name == "sse41.insertps") // Added in 3.6
374  return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_insertps,
375  NewFn);
376  if (Name == "sse41.dppd") // Added in 3.6
377  return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_dppd,
378  NewFn);
379  if (Name == "sse41.dpps") // Added in 3.6
380  return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_dpps,
381  NewFn);
382  if (Name == "sse41.mpsadbw") // Added in 3.6
383  return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_mpsadbw,
384  NewFn);
385  if (Name == "avx.dp.ps.256") // Added in 3.6
386  return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_avx_dp_ps_256,
387  NewFn);
388  if (Name == "avx2.mpsadbw") // Added in 3.6
389  return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_avx2_mpsadbw,
390  NewFn);
391  if (Name == "avx512.mask.cmp.pd.128") // Added in 7.0
392  return UpgradeX86MaskedFPCompare(F, Intrinsic::x86_avx512_mask_cmp_pd_128,
393  NewFn);
394  if (Name == "avx512.mask.cmp.pd.256") // Added in 7.0
395  return UpgradeX86MaskedFPCompare(F, Intrinsic::x86_avx512_mask_cmp_pd_256,
396  NewFn);
397  if (Name == "avx512.mask.cmp.pd.512") // Added in 7.0
398  return UpgradeX86MaskedFPCompare(F, Intrinsic::x86_avx512_mask_cmp_pd_512,
399  NewFn);
400  if (Name == "avx512.mask.cmp.ps.128") // Added in 7.0
401  return UpgradeX86MaskedFPCompare(F, Intrinsic::x86_avx512_mask_cmp_ps_128,
402  NewFn);
403  if (Name == "avx512.mask.cmp.ps.256") // Added in 7.0
404  return UpgradeX86MaskedFPCompare(F, Intrinsic::x86_avx512_mask_cmp_ps_256,
405  NewFn);
406  if (Name == "avx512.mask.cmp.ps.512") // Added in 7.0
407  return UpgradeX86MaskedFPCompare(F, Intrinsic::x86_avx512_mask_cmp_ps_512,
408  NewFn);
409 
410  // frcz.ss/sd may need to have an argument dropped. Added in 3.2
411  if (Name.startswith("xop.vfrcz.ss") && F->arg_size() == 2) {
412  rename(F);
414  Intrinsic::x86_xop_vfrcz_ss);
415  return true;
416  }
417  if (Name.startswith("xop.vfrcz.sd") && F->arg_size() == 2) {
418  rename(F);
420  Intrinsic::x86_xop_vfrcz_sd);
421  return true;
422  }
423  // Upgrade any XOP PERMIL2 index operand still using a float/double vector.
424  if (Name.startswith("xop.vpermil2")) { // Added in 3.9
425  auto Idx = F->getFunctionType()->getParamType(2);
426  if (Idx->isFPOrFPVectorTy()) {
427  rename(F);
428  unsigned IdxSize = Idx->getPrimitiveSizeInBits();
429  unsigned EltSize = Idx->getScalarSizeInBits();
430  Intrinsic::ID Permil2ID;
431  if (EltSize == 64 && IdxSize == 128)
432  Permil2ID = Intrinsic::x86_xop_vpermil2pd;
433  else if (EltSize == 32 && IdxSize == 128)
434  Permil2ID = Intrinsic::x86_xop_vpermil2ps;
435  else if (EltSize == 64 && IdxSize == 256)
436  Permil2ID = Intrinsic::x86_xop_vpermil2pd_256;
437  else
438  Permil2ID = Intrinsic::x86_xop_vpermil2ps_256;
439  NewFn = Intrinsic::getDeclaration(F->getParent(), Permil2ID);
440  return true;
441  }
442  }
443 
444  return false;
445 }
446 
448  assert(F && "Illegal to upgrade a non-existent Function.");
449 
450  // Quickly eliminate it, if it's not a candidate.
451  StringRef Name = F->getName();
452  if (Name.size() <= 8 || !Name.startswith("llvm."))
453  return false;
454  Name = Name.substr(5); // Strip off "llvm."
455 
456  switch (Name[0]) {
457  default: break;
458  case 'a': {
459  if (Name.startswith("arm.rbit") || Name.startswith("aarch64.rbit")) {
460  NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::bitreverse,
461  F->arg_begin()->getType());
462  return true;
463  }
464  if (Name.startswith("arm.neon.vclz")) {
465  Type* args[2] = {
466  F->arg_begin()->getType(),
468  };
469  // Can't use Intrinsic::getDeclaration here as it adds a ".i1" to
470  // the end of the name. Change name from llvm.arm.neon.vclz.* to
471  // llvm.ctlz.*
472  FunctionType* fType = FunctionType::get(F->getReturnType(), args, false);
473  NewFn = Function::Create(fType, F->getLinkage(),
474  "llvm.ctlz." + Name.substr(14), F->getParent());
475  return true;
476  }
477  if (Name.startswith("arm.neon.vcnt")) {
478  NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctpop,
479  F->arg_begin()->getType());
480  return true;
481  }
482  Regex vldRegex("^arm\\.neon\\.vld([1234]|[234]lane)\\.v[a-z0-9]*$");
483  if (vldRegex.match(Name)) {
484  auto fArgs = F->getFunctionType()->params();
485  SmallVector<Type *, 4> Tys(fArgs.begin(), fArgs.end());
486  // Can't use Intrinsic::getDeclaration here as the return types might
487  // then only be structurally equal.
488  FunctionType* fType = FunctionType::get(F->getReturnType(), Tys, false);
489  NewFn = Function::Create(fType, F->getLinkage(),
490  "llvm." + Name + ".p0i8", F->getParent());
491  return true;
492  }
493  Regex vstRegex("^arm\\.neon\\.vst([1234]|[234]lane)\\.v[a-z0-9]*$");
494  if (vstRegex.match(Name)) {
495  static const Intrinsic::ID StoreInts[] = {Intrinsic::arm_neon_vst1,
496  Intrinsic::arm_neon_vst2,
497  Intrinsic::arm_neon_vst3,
498  Intrinsic::arm_neon_vst4};
499 
500  static const Intrinsic::ID StoreLaneInts[] = {
501  Intrinsic::arm_neon_vst2lane, Intrinsic::arm_neon_vst3lane,
502  Intrinsic::arm_neon_vst4lane
503  };
504 
505  auto fArgs = F->getFunctionType()->params();
506  Type *Tys[] = {fArgs[0], fArgs[1]};
507  if (Name.find("lane") == StringRef::npos)
509  StoreInts[fArgs.size() - 3], Tys);
510  else
512  StoreLaneInts[fArgs.size() - 5], Tys);
513  return true;
514  }
515  if (Name == "aarch64.thread.pointer" || Name == "arm.thread.pointer") {
516  NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::thread_pointer);
517  return true;
518  }
519  break;
520  }
521 
522  case 'c': {
523  if (Name.startswith("ctlz.") && F->arg_size() == 1) {
524  rename(F);
525  NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctlz,
526  F->arg_begin()->getType());
527  return true;
528  }
529  if (Name.startswith("cttz.") && F->arg_size() == 1) {
530  rename(F);
531  NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::cttz,
532  F->arg_begin()->getType());
533  return true;
534  }
535  break;
536  }
537  case 'd': {
538  if (Name == "dbg.value" && F->arg_size() == 4) {
539  rename(F);
540  NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::dbg_value);
541  return true;
542  }
543  break;
544  }
545  case 'i':
546  case 'l': {
547  bool IsLifetimeStart = Name.startswith("lifetime.start");
548  if (IsLifetimeStart || Name.startswith("invariant.start")) {
549  Intrinsic::ID ID = IsLifetimeStart ?
550  Intrinsic::lifetime_start : Intrinsic::invariant_start;
551  auto Args = F->getFunctionType()->params();
552  Type* ObjectPtr[1] = {Args[1]};
553  if (F->getName() != Intrinsic::getName(ID, ObjectPtr)) {
554  rename(F);
555  NewFn = Intrinsic::getDeclaration(F->getParent(), ID, ObjectPtr);
556  return true;
557  }
558  }
559 
560  bool IsLifetimeEnd = Name.startswith("lifetime.end");
561  if (IsLifetimeEnd || Name.startswith("invariant.end")) {
562  Intrinsic::ID ID = IsLifetimeEnd ?
563  Intrinsic::lifetime_end : Intrinsic::invariant_end;
564 
565  auto Args = F->getFunctionType()->params();
566  Type* ObjectPtr[1] = {Args[IsLifetimeEnd ? 1 : 2]};
567  if (F->getName() != Intrinsic::getName(ID, ObjectPtr)) {
568  rename(F);
569  NewFn = Intrinsic::getDeclaration(F->getParent(), ID, ObjectPtr);
570  return true;
571  }
572  }
573  if (Name.startswith("invariant.group.barrier")) {
574  // Rename invariant.group.barrier to launder.invariant.group
575  auto Args = F->getFunctionType()->params();
576  Type* ObjectPtr[1] = {Args[0]};
577  rename(F);
579  Intrinsic::launder_invariant_group, ObjectPtr);
580  return true;
581 
582  }
583 
584  break;
585  }
586  case 'm': {
587  if (Name.startswith("masked.load.")) {
588  Type *Tys[] = { F->getReturnType(), F->arg_begin()->getType() };
589  if (F->getName() != Intrinsic::getName(Intrinsic::masked_load, Tys)) {
590  rename(F);
592  Intrinsic::masked_load,
593  Tys);
594  return true;
595  }
596  }
597  if (Name.startswith("masked.store.")) {
598  auto Args = F->getFunctionType()->params();
599  Type *Tys[] = { Args[0], Args[1] };
600  if (F->getName() != Intrinsic::getName(Intrinsic::masked_store, Tys)) {
601  rename(F);
603  Intrinsic::masked_store,
604  Tys);
605  return true;
606  }
607  }
608  // Renaming gather/scatter intrinsics with no address space overloading
609  // to the new overload which includes an address space
610  if (Name.startswith("masked.gather.")) {
611  Type *Tys[] = {F->getReturnType(), F->arg_begin()->getType()};
612  if (F->getName() != Intrinsic::getName(Intrinsic::masked_gather, Tys)) {
613  rename(F);
615  Intrinsic::masked_gather, Tys);
616  return true;
617  }
618  }
619  if (Name.startswith("masked.scatter.")) {
620  auto Args = F->getFunctionType()->params();
621  Type *Tys[] = {Args[0], Args[1]};
622  if (F->getName() != Intrinsic::getName(Intrinsic::masked_scatter, Tys)) {
623  rename(F);
625  Intrinsic::masked_scatter, Tys);
626  return true;
627  }
628  }
629  // Updating the memory intrinsics (memcpy/memmove/memset) that have an
630  // alignment parameter to embedding the alignment as an attribute of
631  // the pointer args.
632  if (Name.startswith("memcpy.") && F->arg_size() == 5) {
633  rename(F);
634  // Get the types of dest, src, and len
635  ArrayRef<Type *> ParamTypes = F->getFunctionType()->params().slice(0, 3);
636  NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::memcpy,
637  ParamTypes);
638  return true;
639  }
640  if (Name.startswith("memmove.") && F->arg_size() == 5) {
641  rename(F);
642  // Get the types of dest, src, and len
643  ArrayRef<Type *> ParamTypes = F->getFunctionType()->params().slice(0, 3);
644  NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::memmove,
645  ParamTypes);
646  return true;
647  }
648  if (Name.startswith("memset.") && F->arg_size() == 5) {
649  rename(F);
650  // Get the types of dest, and len
651  const auto *FT = F->getFunctionType();
652  Type *ParamTypes[2] = {
653  FT->getParamType(0), // Dest
654  FT->getParamType(2) // len
655  };
656  NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::memset,
657  ParamTypes);
658  return true;
659  }
660  break;
661  }
662  case 'n': {
663  if (Name.startswith("nvvm.")) {
664  Name = Name.substr(5);
665 
666  // The following nvvm intrinsics correspond exactly to an LLVM intrinsic.
668  .Cases("brev32", "brev64", Intrinsic::bitreverse)
669  .Case("clz.i", Intrinsic::ctlz)
670  .Case("popc.i", Intrinsic::ctpop)
672  if (IID != Intrinsic::not_intrinsic && F->arg_size() == 1) {
673  NewFn = Intrinsic::getDeclaration(F->getParent(), IID,
674  {F->getReturnType()});
675  return true;
676  }
677 
678  // The following nvvm intrinsics correspond exactly to an LLVM idiom, but
679  // not to an intrinsic alone. We expand them in UpgradeIntrinsicCall.
680  //
681  // TODO: We could add lohi.i2d.
682  bool Expand = StringSwitch<bool>(Name)
683  .Cases("abs.i", "abs.ll", true)
684  .Cases("clz.ll", "popc.ll", "h2f", true)
685  .Cases("max.i", "max.ll", "max.ui", "max.ull", true)
686  .Cases("min.i", "min.ll", "min.ui", "min.ull", true)
687  .Default(false);
688  if (Expand) {
689  NewFn = nullptr;
690  return true;
691  }
692  }
693  break;
694  }
695  case 'o':
696  // We only need to change the name to match the mangling including the
697  // address space.
698  if (Name.startswith("objectsize.")) {
699  Type *Tys[2] = { F->getReturnType(), F->arg_begin()->getType() };
700  if (F->arg_size() == 2 ||
701  F->getName() != Intrinsic::getName(Intrinsic::objectsize, Tys)) {
702  rename(F);
703  NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::objectsize,
704  Tys);
705  return true;
706  }
707  }
708  break;
709 
710  case 's':
711  if (Name == "stackprotectorcheck") {
712  NewFn = nullptr;
713  return true;
714  }
715  break;
716 
717  case 'x':
718  if (UpgradeX86IntrinsicFunction(F, Name, NewFn))
719  return true;
720  }
721  // Remangle our intrinsic since we upgrade the mangling
723  if (Result != None) {
724  NewFn = Result.getValue();
725  return true;
726  }
727 
728  // This may not belong here. This function is effectively being overloaded
729  // to both detect an intrinsic which needs upgrading, and to provide the
730  // upgraded form of the intrinsic. We should perhaps have two separate
731  // functions for this.
732  return false;
733 }
734 
736  NewFn = nullptr;
737  bool Upgraded = UpgradeIntrinsicFunction1(F, NewFn);
738  assert(F != NewFn && "Intrinsic function upgraded to the same function");
739 
740  // Upgrade intrinsic attributes. This does not change the function.
741  if (NewFn)
742  F = NewFn;
743  if (Intrinsic::ID id = F->getIntrinsicID())
745  return Upgraded;
746 }
747 
749  // Nothing to do yet.
750  return false;
751 }
752 
753 // Handles upgrading SSE2/AVX2/AVX512BW PSLLDQ intrinsics by converting them
754 // to byte shuffles.
756  Value *Op, unsigned Shift) {
757  Type *ResultTy = Op->getType();
758  unsigned NumElts = ResultTy->getVectorNumElements() * 8;
759 
760  // Bitcast from a 64-bit element type to a byte element type.
761  Type *VecTy = VectorType::get(Builder.getInt8Ty(), NumElts);
762  Op = Builder.CreateBitCast(Op, VecTy, "cast");
763 
764  // We'll be shuffling in zeroes.
765  Value *Res = Constant::getNullValue(VecTy);
766 
767  // If shift is less than 16, emit a shuffle to move the bytes. Otherwise,
768  // we'll just return the zero vector.
769  if (Shift < 16) {
770  uint32_t Idxs[64];
771  // 256/512-bit version is split into 2/4 16-byte lanes.
772  for (unsigned l = 0; l != NumElts; l += 16)
773  for (unsigned i = 0; i != 16; ++i) {
774  unsigned Idx = NumElts + i - Shift;
775  if (Idx < NumElts)
776  Idx -= NumElts - 16; // end of lane, switch operand.
777  Idxs[l + i] = Idx + l;
778  }
779 
780  Res = Builder.CreateShuffleVector(Res, Op, makeArrayRef(Idxs, NumElts));
781  }
782 
783  // Bitcast back to a 64-bit element type.
784  return Builder.CreateBitCast(Res, ResultTy, "cast");
785 }
786 
787 // Handles upgrading SSE2/AVX2/AVX512BW PSRLDQ intrinsics by converting them
788 // to byte shuffles.
790  unsigned Shift) {
791  Type *ResultTy = Op->getType();
792  unsigned NumElts = ResultTy->getVectorNumElements() * 8;
793 
794  // Bitcast from a 64-bit element type to a byte element type.
795  Type *VecTy = VectorType::get(Builder.getInt8Ty(), NumElts);
796  Op = Builder.CreateBitCast(Op, VecTy, "cast");
797 
798  // We'll be shuffling in zeroes.
799  Value *Res = Constant::getNullValue(VecTy);
800 
801  // If shift is less than 16, emit a shuffle to move the bytes. Otherwise,
802  // we'll just return the zero vector.
803  if (Shift < 16) {
804  uint32_t Idxs[64];
805  // 256/512-bit version is split into 2/4 16-byte lanes.
806  for (unsigned l = 0; l != NumElts; l += 16)
807  for (unsigned i = 0; i != 16; ++i) {
808  unsigned Idx = i + Shift;
809  if (Idx >= 16)
810  Idx += NumElts - 16; // end of lane, switch operand.
811  Idxs[l + i] = Idx + l;
812  }
813 
814  Res = Builder.CreateShuffleVector(Op, Res, makeArrayRef(Idxs, NumElts));
815  }
816 
817  // Bitcast back to a 64-bit element type.
818  return Builder.CreateBitCast(Res, ResultTy, "cast");
819 }
820 
822  unsigned NumElts) {
824  cast<IntegerType>(Mask->getType())->getBitWidth());
825  Mask = Builder.CreateBitCast(Mask, MaskTy);
826 
827  // If we have less than 8 elements, then the starting mask was an i8 and
828  // we need to extract down to the right number of elements.
829  if (NumElts < 8) {
830  uint32_t Indices[4];
831  for (unsigned i = 0; i != NumElts; ++i)
832  Indices[i] = i;
833  Mask = Builder.CreateShuffleVector(Mask, Mask,
834  makeArrayRef(Indices, NumElts),
835  "extract");
836  }
837 
838  return Mask;
839 }
840 
842  Value *Op0, Value *Op1) {
843  // If the mask is all ones just emit the align operation.
844  if (const auto *C = dyn_cast<Constant>(Mask))
845  if (C->isAllOnesValue())
846  return Op0;
847 
848  Mask = getX86MaskVec(Builder, Mask, Op0->getType()->getVectorNumElements());
849  return Builder.CreateSelect(Mask, Op0, Op1);
850 }
851 
852 // Handle autoupgrade for masked PALIGNR and VALIGND/Q intrinsics.
853 // PALIGNR handles large immediates by shifting while VALIGN masks the immediate
854 // so we need to handle both cases. VALIGN also doesn't have 128-bit lanes.
856  Value *Op1, Value *Shift,
857  Value *Passthru, Value *Mask,
858  bool IsVALIGN) {
859  unsigned ShiftVal = cast<llvm::ConstantInt>(Shift)->getZExtValue();
860 
861  unsigned NumElts = Op0->getType()->getVectorNumElements();
862  assert((IsVALIGN || NumElts % 16 == 0) && "Illegal NumElts for PALIGNR!");
863  assert((!IsVALIGN || NumElts <= 16) && "NumElts too large for VALIGN!");
864  assert(isPowerOf2_32(NumElts) && "NumElts not a power of 2!");
865 
866  // Mask the immediate for VALIGN.
867  if (IsVALIGN)
868  ShiftVal &= (NumElts - 1);
869 
870  // If palignr is shifting the pair of vectors more than the size of two
871  // lanes, emit zero.
872  if (ShiftVal >= 32)
873  return llvm::Constant::getNullValue(Op0->getType());
874 
875  // If palignr is shifting the pair of input vectors more than one lane,
876  // but less than two lanes, convert to shifting in zeroes.
877  if (ShiftVal > 16) {
878  ShiftVal -= 16;
879  Op1 = Op0;
881  }
882 
883  uint32_t Indices[64];
884  // 256-bit palignr operates on 128-bit lanes so we need to handle that
885  for (unsigned l = 0; l < NumElts; l += 16) {
886  for (unsigned i = 0; i != 16; ++i) {
887  unsigned Idx = ShiftVal + i;
888  if (!IsVALIGN && Idx >= 16) // Disable wrap for VALIGN.
889  Idx += NumElts - 16; // End of lane, switch operand.
890  Indices[l + i] = Idx + l;
891  }
892  }
893 
894  Value *Align = Builder.CreateShuffleVector(Op1, Op0,
895  makeArrayRef(Indices, NumElts),
896  "palignr");
897 
898  return EmitX86Select(Builder, Mask, Align, Passthru);
899 }
900 
902  Value *Ptr, Value *Data, Value *Mask,
903  bool Aligned) {
904  // Cast the pointer to the right type.
905  Ptr = Builder.CreateBitCast(Ptr,
907  unsigned Align =
908  Aligned ? cast<VectorType>(Data->getType())->getBitWidth() / 8 : 1;
909 
910  // If the mask is all ones just emit a regular store.
911  if (const auto *C = dyn_cast<Constant>(Mask))
912  if (C->isAllOnesValue())
913  return Builder.CreateAlignedStore(Data, Ptr, Align);
914 
915  // Convert the mask from an integer type to a vector of i1.
916  unsigned NumElts = Data->getType()->getVectorNumElements();
917  Mask = getX86MaskVec(Builder, Mask, NumElts);
918  return Builder.CreateMaskedStore(Data, Ptr, Align, Mask);
919 }
920 
922  Value *Ptr, Value *Passthru, Value *Mask,
923  bool Aligned) {
924  // Cast the pointer to the right type.
925  Ptr = Builder.CreateBitCast(Ptr,
927  unsigned Align =
928  Aligned ? cast<VectorType>(Passthru->getType())->getBitWidth() / 8 : 1;
929 
930  // If the mask is all ones just emit a regular store.
931  if (const auto *C = dyn_cast<Constant>(Mask))
932  if (C->isAllOnesValue())
933  return Builder.CreateAlignedLoad(Ptr, Align);
934 
935  // Convert the mask from an integer type to a vector of i1.
936  unsigned NumElts = Passthru->getType()->getVectorNumElements();
937  Mask = getX86MaskVec(Builder, Mask, NumElts);
938  return Builder.CreateMaskedLoad(Ptr, Align, Mask, Passthru);
939 }
940 
941 static Value *upgradeAbs(IRBuilder<> &Builder, CallInst &CI) {
942  Value *Op0 = CI.getArgOperand(0);
943  llvm::Type *Ty = Op0->getType();
945  Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_SGT, Op0, Zero);
946  Value *Neg = Builder.CreateNeg(Op0);
947  Value *Res = Builder.CreateSelect(Cmp, Op0, Neg);
948 
949  if (CI.getNumArgOperands() == 3)
950  Res = EmitX86Select(Builder,CI.getArgOperand(2), Res, CI.getArgOperand(1));
951 
952  return Res;
953 }
954 
956  ICmpInst::Predicate Pred) {
957  Value *Op0 = CI.getArgOperand(0);
958  Value *Op1 = CI.getArgOperand(1);
959  Value *Cmp = Builder.CreateICmp(Pred, Op0, Op1);
960  Value *Res = Builder.CreateSelect(Cmp, Op0, Op1);
961 
962  if (CI.getNumArgOperands() == 4)
963  Res = EmitX86Select(Builder, CI.getArgOperand(3), Res, CI.getArgOperand(2));
964 
965  return Res;
966 }
967 
968 static Value *upgradePMULDQ(IRBuilder<> &Builder, CallInst &CI, bool IsSigned) {
969  Type *Ty = CI.getType();
970 
971  // Arguments have a vXi32 type so cast to vXi64.
972  Value *LHS = Builder.CreateBitCast(CI.getArgOperand(0), Ty);
973  Value *RHS = Builder.CreateBitCast(CI.getArgOperand(1), Ty);
974 
975  if (IsSigned) {
976  // Shift left then arithmetic shift right.
977  Constant *ShiftAmt = ConstantInt::get(Ty, 32);
978  LHS = Builder.CreateShl(LHS, ShiftAmt);
979  LHS = Builder.CreateAShr(LHS, ShiftAmt);
980  RHS = Builder.CreateShl(RHS, ShiftAmt);
981  RHS = Builder.CreateAShr(RHS, ShiftAmt);
982  } else {
983  // Clear the upper bits.
984  Constant *Mask = ConstantInt::get(Ty, 0xffffffff);
985  LHS = Builder.CreateAnd(LHS, Mask);
986  RHS = Builder.CreateAnd(RHS, Mask);
987  }
988 
989  Value *Res = Builder.CreateMul(LHS, RHS);
990 
991  if (CI.getNumArgOperands() == 4)
992  Res = EmitX86Select(Builder, CI.getArgOperand(3), Res, CI.getArgOperand(2));
993 
994  return Res;
995 }
996 
997 // Applying mask on vector of i1's and make sure result is at least 8 bits wide.
999  unsigned NumElts) {
1000  if (Mask) {
1001  const auto *C = dyn_cast<Constant>(Mask);
1002  if (!C || !C->isAllOnesValue())
1003  Vec = Builder.CreateAnd(Vec, getX86MaskVec(Builder, Mask, NumElts));
1004  }
1005 
1006  if (NumElts < 8) {
1007  uint32_t Indices[8];
1008  for (unsigned i = 0; i != NumElts; ++i)
1009  Indices[i] = i;
1010  for (unsigned i = NumElts; i != 8; ++i)
1011  Indices[i] = NumElts + i % NumElts;
1012  Vec = Builder.CreateShuffleVector(Vec,
1014  Indices);
1015  }
1016  return Builder.CreateBitCast(Vec, Builder.getIntNTy(std::max(NumElts, 8U)));
1017 }
1018 
1020  unsigned CC, bool Signed) {
1021  Value *Op0 = CI.getArgOperand(0);
1022  unsigned NumElts = Op0->getType()->getVectorNumElements();
1023 
1024  Value *Cmp;
1025  if (CC == 3) {
1026  Cmp = Constant::getNullValue(llvm::VectorType::get(Builder.getInt1Ty(), NumElts));
1027  } else if (CC == 7) {
1028  Cmp = Constant::getAllOnesValue(llvm::VectorType::get(Builder.getInt1Ty(), NumElts));
1029  } else {
1030  ICmpInst::Predicate Pred;
1031  switch (CC) {
1032  default: llvm_unreachable("Unknown condition code");
1033  case 0: Pred = ICmpInst::ICMP_EQ; break;
1034  case 1: Pred = Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; break;
1035  case 2: Pred = Signed ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; break;
1036  case 4: Pred = ICmpInst::ICMP_NE; break;
1037  case 5: Pred = Signed ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; break;
1038  case 6: Pred = Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; break;
1039  }
1040  Cmp = Builder.CreateICmp(Pred, Op0, CI.getArgOperand(1));
1041  }
1042 
1043  Value *Mask = CI.getArgOperand(CI.getNumArgOperands() - 1);
1044 
1045  return ApplyX86MaskOn1BitsVec(Builder, Cmp, Mask, NumElts);
1046 }
1047 
1048 // Replace a masked intrinsic with an older unmasked intrinsic.
1050  Intrinsic::ID IID) {
1051  Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID);
1052  Value *Rep = Builder.CreateCall(Intrin,
1053  { CI.getArgOperand(0), CI.getArgOperand(1) });
1054  return EmitX86Select(Builder, CI.getArgOperand(3), Rep, CI.getArgOperand(2));
1055 }
1056 
1058  Value* A = CI.getArgOperand(0);
1059  Value* B = CI.getArgOperand(1);
1060  Value* Src = CI.getArgOperand(2);
1061  Value* Mask = CI.getArgOperand(3);
1062 
1063  Value* AndNode = Builder.CreateAnd(Mask, APInt(8, 1));
1064  Value* Cmp = Builder.CreateIsNotNull(AndNode);
1065  Value* Extract1 = Builder.CreateExtractElement(B, (uint64_t)0);
1066  Value* Extract2 = Builder.CreateExtractElement(Src, (uint64_t)0);
1067  Value* Select = Builder.CreateSelect(Cmp, Extract1, Extract2);
1068  return Builder.CreateInsertElement(A, Select, (uint64_t)0);
1069 }
1070 
1071 
1072 static Value* UpgradeMaskToInt(IRBuilder<> &Builder, CallInst &CI) {
1073  Value* Op = CI.getArgOperand(0);
1074  Type* ReturnOp = CI.getType();
1075  unsigned NumElts = CI.getType()->getVectorNumElements();
1076  Value *Mask = getX86MaskVec(Builder, Op, NumElts);
1077  return Builder.CreateSExt(Mask, ReturnOp, "vpmovm2");
1078 }
1079 
1080 // Replace intrinsic with unmasked version and a select.
1082  CallInst &CI, Value *&Rep) {
1083  Name = Name.substr(12); // Remove avx512.mask.
1084 
1085  unsigned VecWidth = CI.getType()->getPrimitiveSizeInBits();
1086  unsigned EltWidth = CI.getType()->getScalarSizeInBits();
1087  Intrinsic::ID IID;
1088  if (Name.startswith("max.p")) {
1089  if (VecWidth == 128 && EltWidth == 32)
1090  IID = Intrinsic::x86_sse_max_ps;
1091  else if (VecWidth == 128 && EltWidth == 64)
1092  IID = Intrinsic::x86_sse2_max_pd;
1093  else if (VecWidth == 256 && EltWidth == 32)
1094  IID = Intrinsic::x86_avx_max_ps_256;
1095  else if (VecWidth == 256 && EltWidth == 64)
1096  IID = Intrinsic::x86_avx_max_pd_256;
1097  else
1098  llvm_unreachable("Unexpected intrinsic");
1099  } else if (Name.startswith("min.p")) {
1100  if (VecWidth == 128 && EltWidth == 32)
1101  IID = Intrinsic::x86_sse_min_ps;
1102  else if (VecWidth == 128 && EltWidth == 64)
1103  IID = Intrinsic::x86_sse2_min_pd;
1104  else if (VecWidth == 256 && EltWidth == 32)
1105  IID = Intrinsic::x86_avx_min_ps_256;
1106  else if (VecWidth == 256 && EltWidth == 64)
1107  IID = Intrinsic::x86_avx_min_pd_256;
1108  else
1109  llvm_unreachable("Unexpected intrinsic");
1110  } else if (Name.startswith("pshuf.b.")) {
1111  if (VecWidth == 128)
1112  IID = Intrinsic::x86_ssse3_pshuf_b_128;
1113  else if (VecWidth == 256)
1114  IID = Intrinsic::x86_avx2_pshuf_b;
1115  else if (VecWidth == 512)
1116  IID = Intrinsic::x86_avx512_pshuf_b_512;
1117  else
1118  llvm_unreachable("Unexpected intrinsic");
1119  } else if (Name.startswith("pmul.hr.sw.")) {
1120  if (VecWidth == 128)
1121  IID = Intrinsic::x86_ssse3_pmul_hr_sw_128;
1122  else if (VecWidth == 256)
1123  IID = Intrinsic::x86_avx2_pmul_hr_sw;
1124  else if (VecWidth == 512)
1125  IID = Intrinsic::x86_avx512_pmul_hr_sw_512;
1126  else
1127  llvm_unreachable("Unexpected intrinsic");
1128  } else if (Name.startswith("pmulh.w.")) {
1129  if (VecWidth == 128)
1130  IID = Intrinsic::x86_sse2_pmulh_w;
1131  else if (VecWidth == 256)
1132  IID = Intrinsic::x86_avx2_pmulh_w;
1133  else if (VecWidth == 512)
1134  IID = Intrinsic::x86_avx512_pmulh_w_512;
1135  else
1136  llvm_unreachable("Unexpected intrinsic");
1137  } else if (Name.startswith("pmulhu.w.")) {
1138  if (VecWidth == 128)
1139  IID = Intrinsic::x86_sse2_pmulhu_w;
1140  else if (VecWidth == 256)
1141  IID = Intrinsic::x86_avx2_pmulhu_w;
1142  else if (VecWidth == 512)
1143  IID = Intrinsic::x86_avx512_pmulhu_w_512;
1144  else
1145  llvm_unreachable("Unexpected intrinsic");
1146  } else if (Name.startswith("pmaddw.d.")) {
1147  if (VecWidth == 128)
1148  IID = Intrinsic::x86_sse2_pmadd_wd;
1149  else if (VecWidth == 256)
1150  IID = Intrinsic::x86_avx2_pmadd_wd;
1151  else if (VecWidth == 512)
1152  IID = Intrinsic::x86_avx512_pmaddw_d_512;
1153  else
1154  llvm_unreachable("Unexpected intrinsic");
1155  } else if (Name.startswith("pmaddubs.w.")) {
1156  if (VecWidth == 128)
1157  IID = Intrinsic::x86_ssse3_pmadd_ub_sw_128;
1158  else if (VecWidth == 256)
1159  IID = Intrinsic::x86_avx2_pmadd_ub_sw;
1160  else if (VecWidth == 512)
1161  IID = Intrinsic::x86_avx512_pmaddubs_w_512;
1162  else
1163  llvm_unreachable("Unexpected intrinsic");
1164  } else if (Name.startswith("packsswb.")) {
1165  if (VecWidth == 128)
1166  IID = Intrinsic::x86_sse2_packsswb_128;
1167  else if (VecWidth == 256)
1168  IID = Intrinsic::x86_avx2_packsswb;
1169  else if (VecWidth == 512)
1170  IID = Intrinsic::x86_avx512_packsswb_512;
1171  else
1172  llvm_unreachable("Unexpected intrinsic");
1173  } else if (Name.startswith("packssdw.")) {
1174  if (VecWidth == 128)
1175  IID = Intrinsic::x86_sse2_packssdw_128;
1176  else if (VecWidth == 256)
1177  IID = Intrinsic::x86_avx2_packssdw;
1178  else if (VecWidth == 512)
1179  IID = Intrinsic::x86_avx512_packssdw_512;
1180  else
1181  llvm_unreachable("Unexpected intrinsic");
1182  } else if (Name.startswith("packuswb.")) {
1183  if (VecWidth == 128)
1184  IID = Intrinsic::x86_sse2_packuswb_128;
1185  else if (VecWidth == 256)
1186  IID = Intrinsic::x86_avx2_packuswb;
1187  else if (VecWidth == 512)
1188  IID = Intrinsic::x86_avx512_packuswb_512;
1189  else
1190  llvm_unreachable("Unexpected intrinsic");
1191  } else if (Name.startswith("packusdw.")) {
1192  if (VecWidth == 128)
1193  IID = Intrinsic::x86_sse41_packusdw;
1194  else if (VecWidth == 256)
1195  IID = Intrinsic::x86_avx2_packusdw;
1196  else if (VecWidth == 512)
1197  IID = Intrinsic::x86_avx512_packusdw_512;
1198  else
1199  llvm_unreachable("Unexpected intrinsic");
1200  } else if (Name.startswith("vpermilvar.")) {
1201  if (VecWidth == 128 && EltWidth == 32)
1202  IID = Intrinsic::x86_avx_vpermilvar_ps;
1203  else if (VecWidth == 128 && EltWidth == 64)
1204  IID = Intrinsic::x86_avx_vpermilvar_pd;
1205  else if (VecWidth == 256 && EltWidth == 32)
1206  IID = Intrinsic::x86_avx_vpermilvar_ps_256;
1207  else if (VecWidth == 256 && EltWidth == 64)
1208  IID = Intrinsic::x86_avx_vpermilvar_pd_256;
1209  else if (VecWidth == 512 && EltWidth == 32)
1210  IID = Intrinsic::x86_avx512_vpermilvar_ps_512;
1211  else if (VecWidth == 512 && EltWidth == 64)
1212  IID = Intrinsic::x86_avx512_vpermilvar_pd_512;
1213  else
1214  llvm_unreachable("Unexpected intrinsic");
1215  } else if (Name == "cvtpd2dq.256") {
1216  IID = Intrinsic::x86_avx_cvt_pd2dq_256;
1217  } else if (Name == "cvtpd2ps.256") {
1218  IID = Intrinsic::x86_avx_cvt_pd2_ps_256;
1219  } else if (Name == "cvttpd2dq.256") {
1220  IID = Intrinsic::x86_avx_cvtt_pd2dq_256;
1221  } else if (Name == "cvttps2dq.128") {
1222  IID = Intrinsic::x86_sse2_cvttps2dq;
1223  } else if (Name == "cvttps2dq.256") {
1224  IID = Intrinsic::x86_avx_cvtt_ps2dq_256;
1225  } else if (Name.startswith("permvar.")) {
1226  bool IsFloat = CI.getType()->isFPOrFPVectorTy();
1227  if (VecWidth == 256 && EltWidth == 32 && IsFloat)
1228  IID = Intrinsic::x86_avx2_permps;
1229  else if (VecWidth == 256 && EltWidth == 32 && !IsFloat)
1230  IID = Intrinsic::x86_avx2_permd;
1231  else if (VecWidth == 256 && EltWidth == 64 && IsFloat)
1232  IID = Intrinsic::x86_avx512_permvar_df_256;
1233  else if (VecWidth == 256 && EltWidth == 64 && !IsFloat)
1234  IID = Intrinsic::x86_avx512_permvar_di_256;
1235  else if (VecWidth == 512 && EltWidth == 32 && IsFloat)
1236  IID = Intrinsic::x86_avx512_permvar_sf_512;
1237  else if (VecWidth == 512 && EltWidth == 32 && !IsFloat)
1238  IID = Intrinsic::x86_avx512_permvar_si_512;
1239  else if (VecWidth == 512 && EltWidth == 64 && IsFloat)
1240  IID = Intrinsic::x86_avx512_permvar_df_512;
1241  else if (VecWidth == 512 && EltWidth == 64 && !IsFloat)
1242  IID = Intrinsic::x86_avx512_permvar_di_512;
1243  else if (VecWidth == 128 && EltWidth == 16)
1244  IID = Intrinsic::x86_avx512_permvar_hi_128;
1245  else if (VecWidth == 256 && EltWidth == 16)
1246  IID = Intrinsic::x86_avx512_permvar_hi_256;
1247  else if (VecWidth == 512 && EltWidth == 16)
1248  IID = Intrinsic::x86_avx512_permvar_hi_512;
1249  else if (VecWidth == 128 && EltWidth == 8)
1250  IID = Intrinsic::x86_avx512_permvar_qi_128;
1251  else if (VecWidth == 256 && EltWidth == 8)
1252  IID = Intrinsic::x86_avx512_permvar_qi_256;
1253  else if (VecWidth == 512 && EltWidth == 8)
1254  IID = Intrinsic::x86_avx512_permvar_qi_512;
1255  else
1256  llvm_unreachable("Unexpected intrinsic");
1257  } else if (Name.startswith("dbpsadbw.")) {
1258  if (VecWidth == 128)
1259  IID = Intrinsic::x86_avx512_dbpsadbw_128;
1260  else if (VecWidth == 256)
1261  IID = Intrinsic::x86_avx512_dbpsadbw_256;
1262  else if (VecWidth == 512)
1263  IID = Intrinsic::x86_avx512_dbpsadbw_512;
1264  else
1265  llvm_unreachable("Unexpected intrinsic");
1266  } else if (Name.startswith("vpshld.")) {
1267  if (VecWidth == 128 && Name[7] == 'q')
1268  IID = Intrinsic::x86_avx512_vpshld_q_128;
1269  else if (VecWidth == 128 && Name[7] == 'd')
1270  IID = Intrinsic::x86_avx512_vpshld_d_128;
1271  else if (VecWidth == 128 && Name[7] == 'w')
1272  IID = Intrinsic::x86_avx512_vpshld_w_128;
1273  else if (VecWidth == 256 && Name[7] == 'q')
1274  IID = Intrinsic::x86_avx512_vpshld_q_256;
1275  else if (VecWidth == 256 && Name[7] == 'd')
1276  IID = Intrinsic::x86_avx512_vpshld_d_256;
1277  else if (VecWidth == 256 && Name[7] == 'w')
1278  IID = Intrinsic::x86_avx512_vpshld_w_256;
1279  else if (VecWidth == 512 && Name[7] == 'q')
1280  IID = Intrinsic::x86_avx512_vpshld_q_512;
1281  else if (VecWidth == 512 && Name[7] == 'd')
1282  IID = Intrinsic::x86_avx512_vpshld_d_512;
1283  else if (VecWidth == 512 && Name[7] == 'w')
1284  IID = Intrinsic::x86_avx512_vpshld_w_512;
1285  else
1286  llvm_unreachable("Unexpected intrinsic");
1287  } else if (Name.startswith("vpshrd.")) {
1288  if (VecWidth == 128 && Name[7] == 'q')
1289  IID = Intrinsic::x86_avx512_vpshrd_q_128;
1290  else if (VecWidth == 128 && Name[7] == 'd')
1291  IID = Intrinsic::x86_avx512_vpshrd_d_128;
1292  else if (VecWidth == 128 && Name[7] == 'w')
1293  IID = Intrinsic::x86_avx512_vpshrd_w_128;
1294  else if (VecWidth == 256 && Name[7] == 'q')
1295  IID = Intrinsic::x86_avx512_vpshrd_q_256;
1296  else if (VecWidth == 256 && Name[7] == 'd')
1297  IID = Intrinsic::x86_avx512_vpshrd_d_256;
1298  else if (VecWidth == 256 && Name[7] == 'w')
1299  IID = Intrinsic::x86_avx512_vpshrd_w_256;
1300  else if (VecWidth == 512 && Name[7] == 'q')
1301  IID = Intrinsic::x86_avx512_vpshrd_q_512;
1302  else if (VecWidth == 512 && Name[7] == 'd')
1303  IID = Intrinsic::x86_avx512_vpshrd_d_512;
1304  else if (VecWidth == 512 && Name[7] == 'w')
1305  IID = Intrinsic::x86_avx512_vpshrd_w_512;
1306  else
1307  llvm_unreachable("Unexpected intrinsic");
1308  } else
1309  return false;
1310 
1312  CI.arg_operands().end());
1313  Args.pop_back();
1314  Args.pop_back();
1315  Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI.getModule(), IID),
1316  Args);
1317  unsigned NumArgs = CI.getNumArgOperands();
1318  Rep = EmitX86Select(Builder, CI.getArgOperand(NumArgs - 1), Rep,
1319  CI.getArgOperand(NumArgs - 2));
1320  return true;
1321 }
1322 
1323 /// Upgrade comment in call to inline asm that represents an objc retain release
1324 /// marker.
1325 void llvm::UpgradeInlineAsmString(std::string *AsmStr) {
1326  size_t Pos;
1327  if (AsmStr->find("mov\tfp") == 0 &&
1328  AsmStr->find("objc_retainAutoreleaseReturnValue") != std::string::npos &&
1329  (Pos = AsmStr->find("# marker")) != std::string::npos) {
1330  AsmStr->replace(Pos, 1, ";");
1331  }
1332  return;
1333 }
1334 
1335 /// Upgrade a call to an old intrinsic. All argument and return casting must be
1336 /// provided to seamlessly integrate with existing context.
1338  Function *F = CI->getCalledFunction();
1339  LLVMContext &C = CI->getContext();
1340  IRBuilder<> Builder(C);
1341  Builder.SetInsertPoint(CI->getParent(), CI->getIterator());
1342 
1343  assert(F && "Intrinsic call is not direct?");
1344 
1345  if (!NewFn) {
1346  // Get the Function's name.
1347  StringRef Name = F->getName();
1348 
1349  assert(Name.startswith("llvm.") && "Intrinsic doesn't start with 'llvm.'");
1350  Name = Name.substr(5);
1351 
1352  bool IsX86 = Name.startswith("x86.");
1353  if (IsX86)
1354  Name = Name.substr(4);
1355  bool IsNVVM = Name.startswith("nvvm.");
1356  if (IsNVVM)
1357  Name = Name.substr(5);
1358 
1359  if (IsX86 && Name.startswith("sse4a.movnt.")) {
1360  Module *M = F->getParent();
1362  Elts.push_back(
1364  MDNode *Node = MDNode::get(C, Elts);
1365 
1366  Value *Arg0 = CI->getArgOperand(0);
1367  Value *Arg1 = CI->getArgOperand(1);
1368 
1369  // Nontemporal (unaligned) store of the 0'th element of the float/double
1370  // vector.
1371  Type *SrcEltTy = cast<VectorType>(Arg1->getType())->getElementType();
1372  PointerType *EltPtrTy = PointerType::getUnqual(SrcEltTy);
1373  Value *Addr = Builder.CreateBitCast(Arg0, EltPtrTy, "cast");
1374  Value *Extract =
1375  Builder.CreateExtractElement(Arg1, (uint64_t)0, "extractelement");
1376 
1377  StoreInst *SI = Builder.CreateAlignedStore(Extract, Addr, 1);
1378  SI->setMetadata(M->getMDKindID("nontemporal"), Node);
1379 
1380  // Remove intrinsic.
1381  CI->eraseFromParent();
1382  return;
1383  }
1384 
1385  if (IsX86 && (Name.startswith("avx.movnt.") ||
1386  Name.startswith("avx512.storent."))) {
1387  Module *M = F->getParent();
1389  Elts.push_back(
1391  MDNode *Node = MDNode::get(C, Elts);
1392 
1393  Value *Arg0 = CI->getArgOperand(0);
1394  Value *Arg1 = CI->getArgOperand(1);
1395 
1396  // Convert the type of the pointer to a pointer to the stored type.
1397  Value *BC = Builder.CreateBitCast(Arg0,
1399  "cast");
1400  VectorType *VTy = cast<VectorType>(Arg1->getType());
1401  StoreInst *SI = Builder.CreateAlignedStore(Arg1, BC,
1402  VTy->getBitWidth() / 8);
1403  SI->setMetadata(M->getMDKindID("nontemporal"), Node);
1404 
1405  // Remove intrinsic.
1406  CI->eraseFromParent();
1407  return;
1408  }
1409 
1410  if (IsX86 && Name == "sse2.storel.dq") {
1411  Value *Arg0 = CI->getArgOperand(0);
1412  Value *Arg1 = CI->getArgOperand(1);
1413 
1414  Type *NewVecTy = VectorType::get(Type::getInt64Ty(C), 2);
1415  Value *BC0 = Builder.CreateBitCast(Arg1, NewVecTy, "cast");
1416  Value *Elt = Builder.CreateExtractElement(BC0, (uint64_t)0);
1417  Value *BC = Builder.CreateBitCast(Arg0,
1419  "cast");
1420  Builder.CreateAlignedStore(Elt, BC, 1);
1421 
1422  // Remove intrinsic.
1423  CI->eraseFromParent();
1424  return;
1425  }
1426 
1427  if (IsX86 && (Name.startswith("sse.storeu.") ||
1428  Name.startswith("sse2.storeu.") ||
1429  Name.startswith("avx.storeu."))) {
1430  Value *Arg0 = CI->getArgOperand(0);
1431  Value *Arg1 = CI->getArgOperand(1);
1432 
1433  Arg0 = Builder.CreateBitCast(Arg0,
1435  "cast");
1436  Builder.CreateAlignedStore(Arg1, Arg0, 1);
1437 
1438  // Remove intrinsic.
1439  CI->eraseFromParent();
1440  return;
1441  }
1442 
1443  if (IsX86 && Name == "avx512.mask.store.ss") {
1444  Value *Mask = Builder.CreateAnd(CI->getArgOperand(2), Builder.getInt8(1));
1445  UpgradeMaskedStore(Builder, CI->getArgOperand(0), CI->getArgOperand(1),
1446  Mask, false);
1447 
1448  // Remove intrinsic.
1449  CI->eraseFromParent();
1450  return;
1451  }
1452 
1453  if (IsX86 && (Name.startswith("avx512.mask.store"))) {
1454  // "avx512.mask.storeu." or "avx512.mask.store."
1455  bool Aligned = Name[17] != 'u'; // "avx512.mask.storeu".
1456  UpgradeMaskedStore(Builder, CI->getArgOperand(0), CI->getArgOperand(1),
1457  CI->getArgOperand(2), Aligned);
1458 
1459  // Remove intrinsic.
1460  CI->eraseFromParent();
1461  return;
1462  }
1463 
1464  Value *Rep;
1465  // Upgrade packed integer vector compare intrinsics to compare instructions.
1466  if (IsX86 && (Name.startswith("sse2.pcmp") ||
1467  Name.startswith("avx2.pcmp"))) {
1468  // "sse2.pcpmpeq." "sse2.pcmpgt." "avx2.pcmpeq." or "avx2.pcmpgt."
1469  bool CmpEq = Name[9] == 'e';
1470  Rep = Builder.CreateICmp(CmpEq ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_SGT,
1471  CI->getArgOperand(0), CI->getArgOperand(1));
1472  Rep = Builder.CreateSExt(Rep, CI->getType(), "");
1473  } else if (IsX86 && (Name.startswith("avx512.broadcastm"))) {
1474  Type *ExtTy = Type::getInt32Ty(C);
1475  if (CI->getOperand(0)->getType()->isIntegerTy(8))
1476  ExtTy = Type::getInt64Ty(C);
1477  unsigned NumElts = CI->getType()->getPrimitiveSizeInBits() /
1478  ExtTy->getPrimitiveSizeInBits();
1479  Rep = Builder.CreateZExt(CI->getArgOperand(0), ExtTy);
1480  Rep = Builder.CreateVectorSplat(NumElts, Rep);
1481  } else if (IsX86 && (Name == "sse.sqrt.ss" ||
1482  Name == "sse2.sqrt.sd")) {
1483  Value *Vec = CI->getArgOperand(0);
1484  Value *Elt0 = Builder.CreateExtractElement(Vec, (uint64_t)0);
1486  Intrinsic::sqrt, Elt0->getType());
1487  Elt0 = Builder.CreateCall(Intr, Elt0);
1488  Rep = Builder.CreateInsertElement(Vec, Elt0, (uint64_t)0);
1489  } else if (IsX86 && (Name.startswith("avx.sqrt.p") ||
1490  Name.startswith("sse2.sqrt.p") ||
1491  Name.startswith("sse.sqrt.p"))) {
1492  Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(),
1493  Intrinsic::sqrt,
1494  CI->getType()),
1495  {CI->getArgOperand(0)});
1496  } else if (IsX86 && (Name.startswith("avx512.mask.sqrt.p") &&
1497  !Name.endswith("512"))) {
1498  Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(),
1499  Intrinsic::sqrt,
1500  CI->getType()),
1501  {CI->getArgOperand(0)});
1502  Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
1503  CI->getArgOperand(1));
1504  } else if (IsX86 && (Name.startswith("avx512.ptestm") ||
1505  Name.startswith("avx512.ptestnm"))) {
1506  Value *Op0 = CI->getArgOperand(0);
1507  Value *Op1 = CI->getArgOperand(1);
1508  Value *Mask = CI->getArgOperand(2);
1509  Rep = Builder.CreateAnd(Op0, Op1);
1510  llvm::Type *Ty = Op0->getType();
1511  Value *Zero = llvm::Constant::getNullValue(Ty);
1512  ICmpInst::Predicate Pred =
1513  Name.startswith("avx512.ptestm") ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ;
1514  Rep = Builder.CreateICmp(Pred, Rep, Zero);
1515  unsigned NumElts = Op0->getType()->getVectorNumElements();
1516  Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, Mask, NumElts);
1517  } else if (IsX86 && (Name.startswith("avx512.mask.pbroadcast"))){
1518  unsigned NumElts =
1520  Rep = Builder.CreateVectorSplat(NumElts, CI->getArgOperand(0));
1521  Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
1522  CI->getArgOperand(1));
1523  } else if (IsX86 && (Name.startswith("avx512.kunpck"))) {
1524  unsigned NumElts = CI->getType()->getScalarSizeInBits();
1525  Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), NumElts);
1526  Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), NumElts);
1527  uint32_t Indices[64];
1528  for (unsigned i = 0; i != NumElts; ++i)
1529  Indices[i] = i;
1530 
1531  // First extract half of each vector. This gives better codegen than
1532  // doing it in a single shuffle.
1533  LHS = Builder.CreateShuffleVector(LHS, LHS,
1534  makeArrayRef(Indices, NumElts / 2));
1535  RHS = Builder.CreateShuffleVector(RHS, RHS,
1536  makeArrayRef(Indices, NumElts / 2));
1537  // Concat the vectors.
1538  // NOTE: Operands have to be swapped to match intrinsic definition.
1539  Rep = Builder.CreateShuffleVector(RHS, LHS,
1540  makeArrayRef(Indices, NumElts));
1541  Rep = Builder.CreateBitCast(Rep, CI->getType());
1542  } else if (IsX86 && Name == "avx512.kand.w") {
1543  Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
1544  Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
1545  Rep = Builder.CreateAnd(LHS, RHS);
1546  Rep = Builder.CreateBitCast(Rep, CI->getType());
1547  } else if (IsX86 && Name == "avx512.kandn.w") {
1548  Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
1549  Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
1550  LHS = Builder.CreateNot(LHS);
1551  Rep = Builder.CreateAnd(LHS, RHS);
1552  Rep = Builder.CreateBitCast(Rep, CI->getType());
1553  } else if (IsX86 && Name == "avx512.kor.w") {
1554  Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
1555  Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
1556  Rep = Builder.CreateOr(LHS, RHS);
1557  Rep = Builder.CreateBitCast(Rep, CI->getType());
1558  } else if (IsX86 && Name == "avx512.kxor.w") {
1559  Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
1560  Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
1561  Rep = Builder.CreateXor(LHS, RHS);
1562  Rep = Builder.CreateBitCast(Rep, CI->getType());
1563  } else if (IsX86 && Name == "avx512.kxnor.w") {
1564  Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
1565  Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
1566  LHS = Builder.CreateNot(LHS);
1567  Rep = Builder.CreateXor(LHS, RHS);
1568  Rep = Builder.CreateBitCast(Rep, CI->getType());
1569  } else if (IsX86 && Name == "avx512.knot.w") {
1570  Rep = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
1571  Rep = Builder.CreateNot(Rep);
1572  Rep = Builder.CreateBitCast(Rep, CI->getType());
1573  } else if (IsX86 &&
1574  (Name == "avx512.kortestz.w" || Name == "avx512.kortestc.w")) {
1575  Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
1576  Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
1577  Rep = Builder.CreateOr(LHS, RHS);
1578  Rep = Builder.CreateBitCast(Rep, Builder.getInt16Ty());
1579  Value *C;
1580  if (Name[14] == 'c')
1582  else
1583  C = ConstantInt::getNullValue(Builder.getInt16Ty());
1584  Rep = Builder.CreateICmpEQ(Rep, C);
1585  Rep = Builder.CreateZExt(Rep, Builder.getInt32Ty());
1586  } else if (IsX86 && (Name == "sse.add.ss" || Name == "sse2.add.sd")) {
1587  Type *I32Ty = Type::getInt32Ty(C);
1588  Value *Elt0 = Builder.CreateExtractElement(CI->getArgOperand(0),
1589  ConstantInt::get(I32Ty, 0));
1590  Value *Elt1 = Builder.CreateExtractElement(CI->getArgOperand(1),
1591  ConstantInt::get(I32Ty, 0));
1592  Rep = Builder.CreateInsertElement(CI->getArgOperand(0),
1593  Builder.CreateFAdd(Elt0, Elt1),
1594  ConstantInt::get(I32Ty, 0));
1595  } else if (IsX86 && (Name == "sse.sub.ss" || Name == "sse2.sub.sd")) {
1596  Type *I32Ty = Type::getInt32Ty(C);
1597  Value *Elt0 = Builder.CreateExtractElement(CI->getArgOperand(0),
1598  ConstantInt::get(I32Ty, 0));
1599  Value *Elt1 = Builder.CreateExtractElement(CI->getArgOperand(1),
1600  ConstantInt::get(I32Ty, 0));
1601  Rep = Builder.CreateInsertElement(CI->getArgOperand(0),
1602  Builder.CreateFSub(Elt0, Elt1),
1603  ConstantInt::get(I32Ty, 0));
1604  } else if (IsX86 && (Name == "sse.mul.ss" || Name == "sse2.mul.sd")) {
1605  Type *I32Ty = Type::getInt32Ty(C);
1606  Value *Elt0 = Builder.CreateExtractElement(CI->getArgOperand(0),
1607  ConstantInt::get(I32Ty, 0));
1608  Value *Elt1 = Builder.CreateExtractElement(CI->getArgOperand(1),
1609  ConstantInt::get(I32Ty, 0));
1610  Rep = Builder.CreateInsertElement(CI->getArgOperand(0),
1611  Builder.CreateFMul(Elt0, Elt1),
1612  ConstantInt::get(I32Ty, 0));
1613  } else if (IsX86 && (Name == "sse.div.ss" || Name == "sse2.div.sd")) {
1614  Type *I32Ty = Type::getInt32Ty(C);
1615  Value *Elt0 = Builder.CreateExtractElement(CI->getArgOperand(0),
1616  ConstantInt::get(I32Ty, 0));
1617  Value *Elt1 = Builder.CreateExtractElement(CI->getArgOperand(1),
1618  ConstantInt::get(I32Ty, 0));
1619  Rep = Builder.CreateInsertElement(CI->getArgOperand(0),
1620  Builder.CreateFDiv(Elt0, Elt1),
1621  ConstantInt::get(I32Ty, 0));
1622  } else if (IsX86 && Name.startswith("avx512.mask.pcmp")) {
1623  // "avx512.mask.pcmpeq." or "avx512.mask.pcmpgt."
1624  bool CmpEq = Name[16] == 'e';
1625  Rep = upgradeMaskedCompare(Builder, *CI, CmpEq ? 0 : 6, true);
1626  } else if (IsX86 && Name.startswith("avx512.mask.cmp")) {
1627  unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
1628  Rep = upgradeMaskedCompare(Builder, *CI, Imm, true);
1629  } else if (IsX86 && Name.startswith("avx512.mask.ucmp")) {
1630  unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
1631  Rep = upgradeMaskedCompare(Builder, *CI, Imm, false);
1632  } else if (IsX86 && (Name.startswith("avx512.cvtb2mask.") ||
1633  Name.startswith("avx512.cvtw2mask.") ||
1634  Name.startswith("avx512.cvtd2mask.") ||
1635  Name.startswith("avx512.cvtq2mask."))) {
1636  Value *Op = CI->getArgOperand(0);
1638  Rep = Builder.CreateICmp(ICmpInst::ICMP_SLT, Op, Zero);
1639  Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, nullptr,
1640  Op->getType()->getVectorNumElements());
1641  } else if(IsX86 && (Name == "ssse3.pabs.b.128" ||
1642  Name == "ssse3.pabs.w.128" ||
1643  Name == "ssse3.pabs.d.128" ||
1644  Name.startswith("avx2.pabs") ||
1645  Name.startswith("avx512.mask.pabs"))) {
1646  Rep = upgradeAbs(Builder, *CI);
1647  } else if (IsX86 && (Name == "sse41.pmaxsb" ||
1648  Name == "sse2.pmaxs.w" ||
1649  Name == "sse41.pmaxsd" ||
1650  Name.startswith("avx2.pmaxs") ||
1651  Name.startswith("avx512.mask.pmaxs"))) {
1652  Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_SGT);
1653  } else if (IsX86 && (Name == "sse2.pmaxu.b" ||
1654  Name == "sse41.pmaxuw" ||
1655  Name == "sse41.pmaxud" ||
1656  Name.startswith("avx2.pmaxu") ||
1657  Name.startswith("avx512.mask.pmaxu"))) {
1658  Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_UGT);
1659  } else if (IsX86 && (Name == "sse41.pminsb" ||
1660  Name == "sse2.pmins.w" ||
1661  Name == "sse41.pminsd" ||
1662  Name.startswith("avx2.pmins") ||
1663  Name.startswith("avx512.mask.pmins"))) {
1664  Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_SLT);
1665  } else if (IsX86 && (Name == "sse2.pminu.b" ||
1666  Name == "sse41.pminuw" ||
1667  Name == "sse41.pminud" ||
1668  Name.startswith("avx2.pminu") ||
1669  Name.startswith("avx512.mask.pminu"))) {
1670  Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_ULT);
1671  } else if (IsX86 && (Name == "sse2.pmulu.dq" ||
1672  Name == "avx2.pmulu.dq" ||
1673  Name == "avx512.pmulu.dq.512" ||
1674  Name.startswith("avx512.mask.pmulu.dq."))) {
1675  Rep = upgradePMULDQ(Builder, *CI, /*Signed*/false);
1676  } else if (IsX86 && (Name == "sse41.pmuldq" ||
1677  Name == "avx2.pmul.dq" ||
1678  Name == "avx512.pmul.dq.512" ||
1679  Name.startswith("avx512.mask.pmul.dq."))) {
1680  Rep = upgradePMULDQ(Builder, *CI, /*Signed*/true);
1681  } else if (IsX86 && (Name == "sse.cvtsi2ss" ||
1682  Name == "sse2.cvtsi2sd" ||
1683  Name == "sse.cvtsi642ss" ||
1684  Name == "sse2.cvtsi642sd")) {
1685  Rep = Builder.CreateSIToFP(CI->getArgOperand(1),
1686  CI->getType()->getVectorElementType());
1687  Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0);
1688  } else if (IsX86 && Name == "avx512.cvtusi2sd") {
1689  Rep = Builder.CreateUIToFP(CI->getArgOperand(1),
1690  CI->getType()->getVectorElementType());
1691  Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0);
1692  } else if (IsX86 && Name == "sse2.cvtss2sd") {
1693  Rep = Builder.CreateExtractElement(CI->getArgOperand(1), (uint64_t)0);
1694  Rep = Builder.CreateFPExt(Rep, CI->getType()->getVectorElementType());
1695  Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0);
1696  } else if (IsX86 && (Name == "sse2.cvtdq2pd" ||
1697  Name == "sse2.cvtdq2ps" ||
1698  Name == "avx.cvtdq2.pd.256" ||
1699  Name == "avx.cvtdq2.ps.256" ||
1700  Name.startswith("avx512.mask.cvtdq2pd.") ||
1701  Name.startswith("avx512.mask.cvtudq2pd.") ||
1702  Name == "avx512.mask.cvtdq2ps.128" ||
1703  Name == "avx512.mask.cvtdq2ps.256" ||
1704  Name == "avx512.mask.cvtudq2ps.128" ||
1705  Name == "avx512.mask.cvtudq2ps.256" ||
1706  Name == "avx512.mask.cvtqq2pd.128" ||
1707  Name == "avx512.mask.cvtqq2pd.256" ||
1708  Name == "avx512.mask.cvtuqq2pd.128" ||
1709  Name == "avx512.mask.cvtuqq2pd.256" ||
1710  Name == "sse2.cvtps2pd" ||
1711  Name == "avx.cvt.ps2.pd.256" ||
1712  Name == "avx512.mask.cvtps2pd.128" ||
1713  Name == "avx512.mask.cvtps2pd.256")) {
1714  Type *DstTy = CI->getType();
1715  Rep = CI->getArgOperand(0);
1716 
1717  unsigned NumDstElts = DstTy->getVectorNumElements();
1718  if (NumDstElts < Rep->getType()->getVectorNumElements()) {
1719  assert(NumDstElts == 2 && "Unexpected vector size");
1720  uint32_t ShuffleMask[2] = { 0, 1 };
1721  Rep = Builder.CreateShuffleVector(Rep, Rep, ShuffleMask);
1722  }
1723 
1724  bool IsPS2PD = (StringRef::npos != Name.find("ps2"));
1725  bool IsUnsigned = (StringRef::npos != Name.find("cvtu"));
1726  if (IsPS2PD)
1727  Rep = Builder.CreateFPExt(Rep, DstTy, "cvtps2pd");
1728  else if (IsUnsigned)
1729  Rep = Builder.CreateUIToFP(Rep, DstTy, "cvt");
1730  else
1731  Rep = Builder.CreateSIToFP(Rep, DstTy, "cvt");
1732 
1733  if (CI->getNumArgOperands() == 3)
1734  Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
1735  CI->getArgOperand(1));
1736  } else if (IsX86 && (Name.startswith("avx512.mask.loadu."))) {
1737  Rep = UpgradeMaskedLoad(Builder, CI->getArgOperand(0),
1738  CI->getArgOperand(1), CI->getArgOperand(2),
1739  /*Aligned*/false);
1740  } else if (IsX86 && (Name.startswith("avx512.mask.load."))) {
1741  Rep = UpgradeMaskedLoad(Builder, CI->getArgOperand(0),
1742  CI->getArgOperand(1),CI->getArgOperand(2),
1743  /*Aligned*/true);
1744  } else if (IsX86 && Name.startswith("avx512.mask.expand.load.")) {
1745  Type *ResultTy = CI->getType();
1746  Type *PtrTy = ResultTy->getVectorElementType();
1747 
1748  // Cast the pointer to element type.
1749  Value *Ptr = Builder.CreateBitCast(CI->getOperand(0),
1751 
1752  Value *MaskVec = getX86MaskVec(Builder, CI->getArgOperand(2),
1753  ResultTy->getVectorNumElements());
1754 
1756  Intrinsic::masked_expandload,
1757  ResultTy);
1758  Rep = Builder.CreateCall(ELd, { Ptr, MaskVec, CI->getOperand(1) });
1759  } else if (IsX86 && Name.startswith("avx512.mask.compress.store.")) {
1760  Type *ResultTy = CI->getArgOperand(1)->getType();
1761  Type *PtrTy = ResultTy->getVectorElementType();
1762 
1763  // Cast the pointer to element type.
1764  Value *Ptr = Builder.CreateBitCast(CI->getOperand(0),
1766 
1767  Value *MaskVec = getX86MaskVec(Builder, CI->getArgOperand(2),
1768  ResultTy->getVectorNumElements());
1769 
1771  Intrinsic::masked_compressstore,
1772  ResultTy);
1773  Rep = Builder.CreateCall(CSt, { CI->getArgOperand(1), Ptr, MaskVec });
1774  } else if (IsX86 && Name.startswith("xop.vpcom")) {
1775  Intrinsic::ID intID;
1776  if (Name.endswith("ub"))
1777  intID = Intrinsic::x86_xop_vpcomub;
1778  else if (Name.endswith("uw"))
1779  intID = Intrinsic::x86_xop_vpcomuw;
1780  else if (Name.endswith("ud"))
1781  intID = Intrinsic::x86_xop_vpcomud;
1782  else if (Name.endswith("uq"))
1783  intID = Intrinsic::x86_xop_vpcomuq;
1784  else if (Name.endswith("b"))
1785  intID = Intrinsic::x86_xop_vpcomb;
1786  else if (Name.endswith("w"))
1787  intID = Intrinsic::x86_xop_vpcomw;
1788  else if (Name.endswith("d"))
1789  intID = Intrinsic::x86_xop_vpcomd;
1790  else if (Name.endswith("q"))
1791  intID = Intrinsic::x86_xop_vpcomq;
1792  else
1793  llvm_unreachable("Unknown suffix");
1794 
1795  Name = Name.substr(9); // strip off "xop.vpcom"
1796  unsigned Imm;
1797  if (Name.startswith("lt"))
1798  Imm = 0;
1799  else if (Name.startswith("le"))
1800  Imm = 1;
1801  else if (Name.startswith("gt"))
1802  Imm = 2;
1803  else if (Name.startswith("ge"))
1804  Imm = 3;
1805  else if (Name.startswith("eq"))
1806  Imm = 4;
1807  else if (Name.startswith("ne"))
1808  Imm = 5;
1809  else if (Name.startswith("false"))
1810  Imm = 6;
1811  else if (Name.startswith("true"))
1812  Imm = 7;
1813  else
1814  llvm_unreachable("Unknown condition");
1815 
1817  Rep =
1818  Builder.CreateCall(VPCOM, {CI->getArgOperand(0), CI->getArgOperand(1),
1819  Builder.getInt8(Imm)});
1820  } else if (IsX86 && Name.startswith("xop.vpcmov")) {
1821  Value *Sel = CI->getArgOperand(2);
1822  Value *NotSel = Builder.CreateNot(Sel);
1823  Value *Sel0 = Builder.CreateAnd(CI->getArgOperand(0), Sel);
1824  Value *Sel1 = Builder.CreateAnd(CI->getArgOperand(1), NotSel);
1825  Rep = Builder.CreateOr(Sel0, Sel1);
1826  } else if (IsX86 && Name == "sse42.crc32.64.8") {
1828  Intrinsic::x86_sse42_crc32_32_8);
1829  Value *Trunc0 = Builder.CreateTrunc(CI->getArgOperand(0), Type::getInt32Ty(C));
1830  Rep = Builder.CreateCall(CRC32, {Trunc0, CI->getArgOperand(1)});
1831  Rep = Builder.CreateZExt(Rep, CI->getType(), "");
1832  } else if (IsX86 && (Name.startswith("avx.vbroadcast.s") ||
1833  Name.startswith("avx512.vbroadcast.s"))) {
1834  // Replace broadcasts with a series of insertelements.
1835  Type *VecTy = CI->getType();
1836  Type *EltTy = VecTy->getVectorElementType();
1837  unsigned EltNum = VecTy->getVectorNumElements();
1838  Value *Cast = Builder.CreateBitCast(CI->getArgOperand(0),
1839  EltTy->getPointerTo());
1840  Value *Load = Builder.CreateLoad(EltTy, Cast);
1841  Type *I32Ty = Type::getInt32Ty(C);
1842  Rep = UndefValue::get(VecTy);
1843  for (unsigned I = 0; I < EltNum; ++I)
1844  Rep = Builder.CreateInsertElement(Rep, Load,
1845  ConstantInt::get(I32Ty, I));
1846  } else if (IsX86 && (Name.startswith("sse41.pmovsx") ||
1847  Name.startswith("sse41.pmovzx") ||
1848  Name.startswith("avx2.pmovsx") ||
1849  Name.startswith("avx2.pmovzx") ||
1850  Name.startswith("avx512.mask.pmovsx") ||
1851  Name.startswith("avx512.mask.pmovzx"))) {
1852  VectorType *SrcTy = cast<VectorType>(CI->getArgOperand(0)->getType());
1853  VectorType *DstTy = cast<VectorType>(CI->getType());
1854  unsigned NumDstElts = DstTy->getNumElements();
1855 
1856  // Extract a subvector of the first NumDstElts lanes and sign/zero extend.
1857  SmallVector<uint32_t, 8> ShuffleMask(NumDstElts);
1858  for (unsigned i = 0; i != NumDstElts; ++i)
1859  ShuffleMask[i] = i;
1860 
1861  Value *SV = Builder.CreateShuffleVector(
1862  CI->getArgOperand(0), UndefValue::get(SrcTy), ShuffleMask);
1863 
1864  bool DoSext = (StringRef::npos != Name.find("pmovsx"));
1865  Rep = DoSext ? Builder.CreateSExt(SV, DstTy)
1866  : Builder.CreateZExt(SV, DstTy);
1867  // If there are 3 arguments, it's a masked intrinsic so we need a select.
1868  if (CI->getNumArgOperands() == 3)
1869  Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
1870  CI->getArgOperand(1));
1871  } else if (IsX86 && (Name.startswith("avx.vbroadcastf128") ||
1872  Name == "avx2.vbroadcasti128")) {
1873  // Replace vbroadcastf128/vbroadcasti128 with a vector load+shuffle.
1874  Type *EltTy = CI->getType()->getVectorElementType();
1875  unsigned NumSrcElts = 128 / EltTy->getPrimitiveSizeInBits();
1876  Type *VT = VectorType::get(EltTy, NumSrcElts);
1877  Value *Op = Builder.CreatePointerCast(CI->getArgOperand(0),
1879  Value *Load = Builder.CreateAlignedLoad(Op, 1);
1880  if (NumSrcElts == 2)
1881  Rep = Builder.CreateShuffleVector(Load, UndefValue::get(Load->getType()),
1882  { 0, 1, 0, 1 });
1883  else
1884  Rep = Builder.CreateShuffleVector(Load, UndefValue::get(Load->getType()),
1885  { 0, 1, 2, 3, 0, 1, 2, 3 });
1886  } else if (IsX86 && (Name.startswith("avx512.mask.shuf.i") ||
1887  Name.startswith("avx512.mask.shuf.f"))) {
1888  unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
1889  Type *VT = CI->getType();
1890  unsigned NumLanes = VT->getPrimitiveSizeInBits() / 128;
1891  unsigned NumElementsInLane = 128 / VT->getScalarSizeInBits();
1892  unsigned ControlBitsMask = NumLanes - 1;
1893  unsigned NumControlBits = NumLanes / 2;
1894  SmallVector<uint32_t, 8> ShuffleMask(0);
1895 
1896  for (unsigned l = 0; l != NumLanes; ++l) {
1897  unsigned LaneMask = (Imm >> (l * NumControlBits)) & ControlBitsMask;
1898  // We actually need the other source.
1899  if (l >= NumLanes / 2)
1900  LaneMask += NumLanes;
1901  for (unsigned i = 0; i != NumElementsInLane; ++i)
1902  ShuffleMask.push_back(LaneMask * NumElementsInLane + i);
1903  }
1904  Rep = Builder.CreateShuffleVector(CI->getArgOperand(0),
1905  CI->getArgOperand(1), ShuffleMask);
1906  Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep,
1907  CI->getArgOperand(3));
1908  }else if (IsX86 && (Name.startswith("avx512.mask.broadcastf") ||
1909  Name.startswith("avx512.mask.broadcasti"))) {
1910  unsigned NumSrcElts =
1912  unsigned NumDstElts = CI->getType()->getVectorNumElements();
1913 
1914  SmallVector<uint32_t, 8> ShuffleMask(NumDstElts);
1915  for (unsigned i = 0; i != NumDstElts; ++i)
1916  ShuffleMask[i] = i % NumSrcElts;
1917 
1918  Rep = Builder.CreateShuffleVector(CI->getArgOperand(0),
1919  CI->getArgOperand(0),
1920  ShuffleMask);
1921  Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
1922  CI->getArgOperand(1));
1923  } else if (IsX86 && (Name.startswith("avx2.pbroadcast") ||
1924  Name.startswith("avx2.vbroadcast") ||
1925  Name.startswith("avx512.pbroadcast") ||
1926  Name.startswith("avx512.mask.broadcast.s"))) {
1927  // Replace vp?broadcasts with a vector shuffle.
1928  Value *Op = CI->getArgOperand(0);
1929  unsigned NumElts = CI->getType()->getVectorNumElements();
1930  Type *MaskTy = VectorType::get(Type::getInt32Ty(C), NumElts);
1931  Rep = Builder.CreateShuffleVector(Op, UndefValue::get(Op->getType()),
1932  Constant::getNullValue(MaskTy));
1933 
1934  if (CI->getNumArgOperands() == 3)
1935  Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
1936  CI->getArgOperand(1));
1937  } else if (IsX86 && Name.startswith("avx512.mask.palignr.")) {
1938  Rep = UpgradeX86ALIGNIntrinsics(Builder, CI->getArgOperand(0),
1939  CI->getArgOperand(1),
1940  CI->getArgOperand(2),
1941  CI->getArgOperand(3),
1942  CI->getArgOperand(4),
1943  false);
1944  } else if (IsX86 && Name.startswith("avx512.mask.valign.")) {
1945  Rep = UpgradeX86ALIGNIntrinsics(Builder, CI->getArgOperand(0),
1946  CI->getArgOperand(1),
1947  CI->getArgOperand(2),
1948  CI->getArgOperand(3),
1949  CI->getArgOperand(4),
1950  true);
1951  } else if (IsX86 && (Name == "sse2.psll.dq" ||
1952  Name == "avx2.psll.dq")) {
1953  // 128/256-bit shift left specified in bits.
1954  unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
1955  Rep = UpgradeX86PSLLDQIntrinsics(Builder, CI->getArgOperand(0),
1956  Shift / 8); // Shift is in bits.
1957  } else if (IsX86 && (Name == "sse2.psrl.dq" ||
1958  Name == "avx2.psrl.dq")) {
1959  // 128/256-bit shift right specified in bits.
1960  unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
1961  Rep = UpgradeX86PSRLDQIntrinsics(Builder, CI->getArgOperand(0),
1962  Shift / 8); // Shift is in bits.
1963  } else if (IsX86 && (Name == "sse2.psll.dq.bs" ||
1964  Name == "avx2.psll.dq.bs" ||
1965  Name == "avx512.psll.dq.512")) {
1966  // 128/256/512-bit shift left specified in bytes.
1967  unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
1968  Rep = UpgradeX86PSLLDQIntrinsics(Builder, CI->getArgOperand(0), Shift);
1969  } else if (IsX86 && (Name == "sse2.psrl.dq.bs" ||
1970  Name == "avx2.psrl.dq.bs" ||
1971  Name == "avx512.psrl.dq.512")) {
1972  // 128/256/512-bit shift right specified in bytes.
1973  unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
1974  Rep = UpgradeX86PSRLDQIntrinsics(Builder, CI->getArgOperand(0), Shift);
1975  } else if (IsX86 && (Name == "sse41.pblendw" ||
1976  Name.startswith("sse41.blendp") ||
1977  Name.startswith("avx.blend.p") ||
1978  Name == "avx2.pblendw" ||
1979  Name.startswith("avx2.pblendd."))) {
1980  Value *Op0 = CI->getArgOperand(0);
1981  Value *Op1 = CI->getArgOperand(1);
1982  unsigned Imm = cast <ConstantInt>(CI->getArgOperand(2))->getZExtValue();
1983  VectorType *VecTy = cast<VectorType>(CI->getType());
1984  unsigned NumElts = VecTy->getNumElements();
1985 
1986  SmallVector<uint32_t, 16> Idxs(NumElts);
1987  for (unsigned i = 0; i != NumElts; ++i)
1988  Idxs[i] = ((Imm >> (i%8)) & 1) ? i + NumElts : i;
1989 
1990  Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
1991  } else if (IsX86 && (Name.startswith("avx.vinsertf128.") ||
1992  Name == "avx2.vinserti128" ||
1993  Name.startswith("avx512.mask.insert"))) {
1994  Value *Op0 = CI->getArgOperand(0);
1995  Value *Op1 = CI->getArgOperand(1);
1996  unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
1997  unsigned DstNumElts = CI->getType()->getVectorNumElements();
1998  unsigned SrcNumElts = Op1->getType()->getVectorNumElements();
1999  unsigned Scale = DstNumElts / SrcNumElts;
2000 
2001  // Mask off the high bits of the immediate value; hardware ignores those.
2002  Imm = Imm % Scale;
2003 
2004  // Extend the second operand into a vector the size of the destination.
2005  Value *UndefV = UndefValue::get(Op1->getType());
2006  SmallVector<uint32_t, 8> Idxs(DstNumElts);
2007  for (unsigned i = 0; i != SrcNumElts; ++i)
2008  Idxs[i] = i;
2009  for (unsigned i = SrcNumElts; i != DstNumElts; ++i)
2010  Idxs[i] = SrcNumElts;
2011  Rep = Builder.CreateShuffleVector(Op1, UndefV, Idxs);
2012 
2013  // Insert the second operand into the first operand.
2014 
2015  // Note that there is no guarantee that instruction lowering will actually
2016  // produce a vinsertf128 instruction for the created shuffles. In
2017  // particular, the 0 immediate case involves no lane changes, so it can
2018  // be handled as a blend.
2019 
2020  // Example of shuffle mask for 32-bit elements:
2021  // Imm = 1 <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
2022  // Imm = 0 <i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7 >
2023 
2024  // First fill with identify mask.
2025  for (unsigned i = 0; i != DstNumElts; ++i)
2026  Idxs[i] = i;
2027  // Then replace the elements where we need to insert.
2028  for (unsigned i = 0; i != SrcNumElts; ++i)
2029  Idxs[i + Imm * SrcNumElts] = i + DstNumElts;
2030  Rep = Builder.CreateShuffleVector(Op0, Rep, Idxs);
2031 
2032  // If the intrinsic has a mask operand, handle that.
2033  if (CI->getNumArgOperands() == 5)
2034  Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep,
2035  CI->getArgOperand(3));
2036  } else if (IsX86 && (Name.startswith("avx.vextractf128.") ||
2037  Name == "avx2.vextracti128" ||
2038  Name.startswith("avx512.mask.vextract"))) {
2039  Value *Op0 = CI->getArgOperand(0);
2040  unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
2041  unsigned DstNumElts = CI->getType()->getVectorNumElements();
2042  unsigned SrcNumElts = Op0->getType()->getVectorNumElements();
2043  unsigned Scale = SrcNumElts / DstNumElts;
2044 
2045  // Mask off the high bits of the immediate value; hardware ignores those.
2046  Imm = Imm % Scale;
2047 
2048  // Get indexes for the subvector of the input vector.
2049  SmallVector<uint32_t, 8> Idxs(DstNumElts);
2050  for (unsigned i = 0; i != DstNumElts; ++i) {
2051  Idxs[i] = i + (Imm * DstNumElts);
2052  }
2053  Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
2054 
2055  // If the intrinsic has a mask operand, handle that.
2056  if (CI->getNumArgOperands() == 4)
2057  Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
2058  CI->getArgOperand(2));
2059  } else if (!IsX86 && Name == "stackprotectorcheck") {
2060  Rep = nullptr;
2061  } else if (IsX86 && (Name.startswith("avx512.mask.perm.df.") ||
2062  Name.startswith("avx512.mask.perm.di."))) {
2063  Value *Op0 = CI->getArgOperand(0);
2064  unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
2065  VectorType *VecTy = cast<VectorType>(CI->getType());
2066  unsigned NumElts = VecTy->getNumElements();
2067 
2068  SmallVector<uint32_t, 8> Idxs(NumElts);
2069  for (unsigned i = 0; i != NumElts; ++i)
2070  Idxs[i] = (i & ~0x3) + ((Imm >> (2 * (i & 0x3))) & 3);
2071 
2072  Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
2073 
2074  if (CI->getNumArgOperands() == 4)
2075  Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
2076  CI->getArgOperand(2));
2077  } else if (IsX86 && (Name.startswith("avx.vperm2f128.") ||
2078  Name == "avx2.vperm2i128")) {
2079  // The immediate permute control byte looks like this:
2080  // [1:0] - select 128 bits from sources for low half of destination
2081  // [2] - ignore
2082  // [3] - zero low half of destination
2083  // [5:4] - select 128 bits from sources for high half of destination
2084  // [6] - ignore
2085  // [7] - zero high half of destination
2086 
2087  uint8_t Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
2088 
2089  unsigned NumElts = CI->getType()->getVectorNumElements();
2090  unsigned HalfSize = NumElts / 2;
2091  SmallVector<uint32_t, 8> ShuffleMask(NumElts);
2092 
2093  // Determine which operand(s) are actually in use for this instruction.
2094  Value *V0 = (Imm & 0x02) ? CI->getArgOperand(1) : CI->getArgOperand(0);
2095  Value *V1 = (Imm & 0x20) ? CI->getArgOperand(1) : CI->getArgOperand(0);
2096 
2097  // If needed, replace operands based on zero mask.
2098  V0 = (Imm & 0x08) ? ConstantAggregateZero::get(CI->getType()) : V0;
2099  V1 = (Imm & 0x80) ? ConstantAggregateZero::get(CI->getType()) : V1;
2100 
2101  // Permute low half of result.
2102  unsigned StartIndex = (Imm & 0x01) ? HalfSize : 0;
2103  for (unsigned i = 0; i < HalfSize; ++i)
2104  ShuffleMask[i] = StartIndex + i;
2105 
2106  // Permute high half of result.
2107  StartIndex = (Imm & 0x10) ? HalfSize : 0;
2108  for (unsigned i = 0; i < HalfSize; ++i)
2109  ShuffleMask[i + HalfSize] = NumElts + StartIndex + i;
2110 
2111  Rep = Builder.CreateShuffleVector(V0, V1, ShuffleMask);
2112 
2113  } else if (IsX86 && (Name.startswith("avx.vpermil.") ||
2114  Name == "sse2.pshuf.d" ||
2115  Name.startswith("avx512.mask.vpermil.p") ||
2116  Name.startswith("avx512.mask.pshuf.d."))) {
2117  Value *Op0 = CI->getArgOperand(0);
2118  unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
2119  VectorType *VecTy = cast<VectorType>(CI->getType());
2120  unsigned NumElts = VecTy->getNumElements();
2121  // Calculate the size of each index in the immediate.
2122  unsigned IdxSize = 64 / VecTy->getScalarSizeInBits();
2123  unsigned IdxMask = ((1 << IdxSize) - 1);
2124 
2125  SmallVector<uint32_t, 8> Idxs(NumElts);
2126  // Lookup the bits for this element, wrapping around the immediate every
2127  // 8-bits. Elements are grouped into sets of 2 or 4 elements so we need
2128  // to offset by the first index of each group.
2129  for (unsigned i = 0; i != NumElts; ++i)
2130  Idxs[i] = ((Imm >> ((i * IdxSize) % 8)) & IdxMask) | (i & ~IdxMask);
2131 
2132  Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
2133 
2134  if (CI->getNumArgOperands() == 4)
2135  Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
2136  CI->getArgOperand(2));
2137  } else if (IsX86 && (Name == "sse2.pshufl.w" ||
2138  Name.startswith("avx512.mask.pshufl.w."))) {
2139  Value *Op0 = CI->getArgOperand(0);
2140  unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
2141  unsigned NumElts = CI->getType()->getVectorNumElements();
2142 
2143  SmallVector<uint32_t, 16> Idxs(NumElts);
2144  for (unsigned l = 0; l != NumElts; l += 8) {
2145  for (unsigned i = 0; i != 4; ++i)
2146  Idxs[i + l] = ((Imm >> (2 * i)) & 0x3) + l;
2147  for (unsigned i = 4; i != 8; ++i)
2148  Idxs[i + l] = i + l;
2149  }
2150 
2151  Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
2152 
2153  if (CI->getNumArgOperands() == 4)
2154  Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
2155  CI->getArgOperand(2));
2156  } else if (IsX86 && (Name == "sse2.pshufh.w" ||
2157  Name.startswith("avx512.mask.pshufh.w."))) {
2158  Value *Op0 = CI->getArgOperand(0);
2159  unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
2160  unsigned NumElts = CI->getType()->getVectorNumElements();
2161 
2162  SmallVector<uint32_t, 16> Idxs(NumElts);
2163  for (unsigned l = 0; l != NumElts; l += 8) {
2164  for (unsigned i = 0; i != 4; ++i)
2165  Idxs[i + l] = i + l;
2166  for (unsigned i = 0; i != 4; ++i)
2167  Idxs[i + l + 4] = ((Imm >> (2 * i)) & 0x3) + 4 + l;
2168  }
2169 
2170  Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
2171 
2172  if (CI->getNumArgOperands() == 4)
2173  Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
2174  CI->getArgOperand(2));
2175  } else if (IsX86 && Name.startswith("avx512.mask.shuf.p")) {
2176  Value *Op0 = CI->getArgOperand(0);
2177  Value *Op1 = CI->getArgOperand(1);
2178  unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
2179  unsigned NumElts = CI->getType()->getVectorNumElements();
2180 
2181  unsigned NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
2182  unsigned HalfLaneElts = NumLaneElts / 2;
2183 
2184  SmallVector<uint32_t, 16> Idxs(NumElts);
2185  for (unsigned i = 0; i != NumElts; ++i) {
2186  // Base index is the starting element of the lane.
2187  Idxs[i] = i - (i % NumLaneElts);
2188  // If we are half way through the lane switch to the other source.
2189  if ((i % NumLaneElts) >= HalfLaneElts)
2190  Idxs[i] += NumElts;
2191  // Now select the specific element. By adding HalfLaneElts bits from
2192  // the immediate. Wrapping around the immediate every 8-bits.
2193  Idxs[i] += (Imm >> ((i * HalfLaneElts) % 8)) & ((1 << HalfLaneElts) - 1);
2194  }
2195 
2196  Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
2197 
2198  Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep,
2199  CI->getArgOperand(3));
2200  } else if (IsX86 && (Name.startswith("avx512.mask.movddup") ||
2201  Name.startswith("avx512.mask.movshdup") ||
2202  Name.startswith("avx512.mask.movsldup"))) {
2203  Value *Op0 = CI->getArgOperand(0);
2204  unsigned NumElts = CI->getType()->getVectorNumElements();
2205  unsigned NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
2206 
2207  unsigned Offset = 0;
2208  if (Name.startswith("avx512.mask.movshdup."))
2209  Offset = 1;
2210 
2211  SmallVector<uint32_t, 16> Idxs(NumElts);
2212  for (unsigned l = 0; l != NumElts; l += NumLaneElts)
2213  for (unsigned i = 0; i != NumLaneElts; i += 2) {
2214  Idxs[i + l + 0] = i + l + Offset;
2215  Idxs[i + l + 1] = i + l + Offset;
2216  }
2217 
2218  Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
2219 
2220  Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
2221  CI->getArgOperand(1));
2222  } else if (IsX86 && (Name.startswith("avx512.mask.punpckl") ||
2223  Name.startswith("avx512.mask.unpckl."))) {
2224  Value *Op0 = CI->getArgOperand(0);
2225  Value *Op1 = CI->getArgOperand(1);
2226  int NumElts = CI->getType()->getVectorNumElements();
2227  int NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
2228 
2229  SmallVector<uint32_t, 64> Idxs(NumElts);
2230  for (int l = 0; l != NumElts; l += NumLaneElts)
2231  for (int i = 0; i != NumLaneElts; ++i)
2232  Idxs[i + l] = l + (i / 2) + NumElts * (i % 2);
2233 
2234  Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
2235 
2236  Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
2237  CI->getArgOperand(2));
2238  } else if (IsX86 && (Name.startswith("avx512.mask.punpckh") ||
2239  Name.startswith("avx512.mask.unpckh."))) {
2240  Value *Op0 = CI->getArgOperand(0);
2241  Value *Op1 = CI->getArgOperand(1);
2242  int NumElts = CI->getType()->getVectorNumElements();
2243  int NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
2244 
2245  SmallVector<uint32_t, 64> Idxs(NumElts);
2246  for (int l = 0; l != NumElts; l += NumLaneElts)
2247  for (int i = 0; i != NumLaneElts; ++i)
2248  Idxs[i + l] = (NumLaneElts / 2) + l + (i / 2) + NumElts * (i % 2);
2249 
2250  Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
2251 
2252  Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
2253  CI->getArgOperand(2));
2254  } else if (IsX86 && Name.startswith("avx512.mask.pand.")) {
2255  Rep = Builder.CreateAnd(CI->getArgOperand(0), CI->getArgOperand(1));
2256  Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
2257  CI->getArgOperand(2));
2258  } else if (IsX86 && Name.startswith("avx512.mask.pandn.")) {
2259  Rep = Builder.CreateAnd(Builder.CreateNot(CI->getArgOperand(0)),
2260  CI->getArgOperand(1));
2261  Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
2262  CI->getArgOperand(2));
2263  } else if (IsX86 && Name.startswith("avx512.mask.por.")) {
2264  Rep = Builder.CreateOr(CI->getArgOperand(0), CI->getArgOperand(1));
2265  Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
2266  CI->getArgOperand(2));
2267  } else if (IsX86 && Name.startswith("avx512.mask.pxor.")) {
2268  Rep = Builder.CreateXor(CI->getArgOperand(0), CI->getArgOperand(1));
2269  Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
2270  CI->getArgOperand(2));
2271  } else if (IsX86 && Name.startswith("avx512.mask.and.")) {
2272  VectorType *FTy = cast<VectorType>(CI->getType());
2273  VectorType *ITy = VectorType::getInteger(FTy);
2274  Rep = Builder.CreateAnd(Builder.CreateBitCast(CI->getArgOperand(0), ITy),
2275  Builder.CreateBitCast(CI->getArgOperand(1), ITy));
2276  Rep = Builder.CreateBitCast(Rep, FTy);
2277  Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
2278  CI->getArgOperand(2));
2279  } else if (IsX86 && Name.startswith("avx512.mask.andn.")) {
2280  VectorType *FTy = cast<VectorType>(CI->getType());
2281  VectorType *ITy = VectorType::getInteger(FTy);
2282  Rep = Builder.CreateNot(Builder.CreateBitCast(CI->getArgOperand(0), ITy));
2283  Rep = Builder.CreateAnd(Rep,
2284  Builder.CreateBitCast(CI->getArgOperand(1), ITy));
2285  Rep = Builder.CreateBitCast(Rep, FTy);
2286  Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
2287  CI->getArgOperand(2));
2288  } else if (IsX86 && Name.startswith("avx512.mask.or.")) {
2289  VectorType *FTy = cast<VectorType>(CI->getType());
2290  VectorType *ITy = VectorType::getInteger(FTy);
2291  Rep = Builder.CreateOr(Builder.CreateBitCast(CI->getArgOperand(0), ITy),
2292  Builder.CreateBitCast(CI->getArgOperand(1), ITy));
2293  Rep = Builder.CreateBitCast(Rep, FTy);
2294  Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
2295  CI->getArgOperand(2));
2296  } else if (IsX86 && Name.startswith("avx512.mask.xor.")) {
2297  VectorType *FTy = cast<VectorType>(CI->getType());
2298  VectorType *ITy = VectorType::getInteger(FTy);
2299  Rep = Builder.CreateXor(Builder.CreateBitCast(CI->getArgOperand(0), ITy),
2300  Builder.CreateBitCast(CI->getArgOperand(1), ITy));
2301  Rep = Builder.CreateBitCast(Rep, FTy);
2302  Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
2303  CI->getArgOperand(2));
2304  } else if (IsX86 && Name.startswith("avx512.mask.padd.")) {
2305  Rep = Builder.CreateAdd(CI->getArgOperand(0), CI->getArgOperand(1));
2306  Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
2307  CI->getArgOperand(2));
2308  } else if (IsX86 && Name.startswith("avx512.mask.psub.")) {
2309  Rep = Builder.CreateSub(CI->getArgOperand(0), CI->getArgOperand(1));
2310  Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
2311  CI->getArgOperand(2));
2312  } else if (IsX86 && Name.startswith("avx512.mask.pmull.")) {
2313  Rep = Builder.CreateMul(CI->getArgOperand(0), CI->getArgOperand(1));
2314  Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
2315  CI->getArgOperand(2));
2316  } else if (IsX86 && Name.startswith("avx512.mask.add.p")) {
2317  if (Name.endswith(".512")) {
2318  Intrinsic::ID IID;
2319  if (Name[17] == 's')
2320  IID = Intrinsic::x86_avx512_add_ps_512;
2321  else
2322  IID = Intrinsic::x86_avx512_add_pd_512;
2323 
2324  Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
2325  { CI->getArgOperand(0), CI->getArgOperand(1),
2326  CI->getArgOperand(4) });
2327  } else {
2328  Rep = Builder.CreateFAdd(CI->getArgOperand(0), CI->getArgOperand(1));
2329  }
2330  Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
2331  CI->getArgOperand(2));
2332  } else if (IsX86 && Name.startswith("avx512.mask.div.p")) {
2333  if (Name.endswith(".512")) {
2334  Intrinsic::ID IID;
2335  if (Name[17] == 's')
2336  IID = Intrinsic::x86_avx512_div_ps_512;
2337  else
2338  IID = Intrinsic::x86_avx512_div_pd_512;
2339 
2340  Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
2341  { CI->getArgOperand(0), CI->getArgOperand(1),
2342  CI->getArgOperand(4) });
2343  } else {
2344  Rep = Builder.CreateFDiv(CI->getArgOperand(0), CI->getArgOperand(1));
2345  }
2346  Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
2347  CI->getArgOperand(2));
2348  } else if (IsX86 && Name.startswith("avx512.mask.mul.p")) {
2349  if (Name.endswith(".512")) {
2350  Intrinsic::ID IID;
2351  if (Name[17] == 's')
2352  IID = Intrinsic::x86_avx512_mul_ps_512;
2353  else
2354  IID = Intrinsic::x86_avx512_mul_pd_512;
2355 
2356  Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
2357  { CI->getArgOperand(0), CI->getArgOperand(1),
2358  CI->getArgOperand(4) });
2359  } else {
2360  Rep = Builder.CreateFMul(CI->getArgOperand(0), CI->getArgOperand(1));
2361  }
2362  Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
2363  CI->getArgOperand(2));
2364  } else if (IsX86 && Name.startswith("avx512.mask.sub.p")) {
2365  if (Name.endswith(".512")) {
2366  Intrinsic::ID IID;
2367  if (Name[17] == 's')
2368  IID = Intrinsic::x86_avx512_sub_ps_512;
2369  else
2370  IID = Intrinsic::x86_avx512_sub_pd_512;
2371 
2372  Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
2373  { CI->getArgOperand(0), CI->getArgOperand(1),
2374  CI->getArgOperand(4) });
2375  } else {
2376  Rep = Builder.CreateFSub(CI->getArgOperand(0), CI->getArgOperand(1));
2377  }
2378  Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
2379  CI->getArgOperand(2));
2380  } else if (IsX86 && Name.startswith("avx512.mask.max.p") &&
2381  Name.drop_front(18) == ".512") {
2382  Intrinsic::ID IID;
2383  if (Name[17] == 's')
2384  IID = Intrinsic::x86_avx512_max_ps_512;
2385  else
2386  IID = Intrinsic::x86_avx512_max_pd_512;
2387 
2388  Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
2389  { CI->getArgOperand(0), CI->getArgOperand(1),
2390  CI->getArgOperand(4) });
2391  Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
2392  CI->getArgOperand(2));
2393  } else if (IsX86 && Name.startswith("avx512.mask.min.p") &&
2394  Name.drop_front(18) == ".512") {
2395  Intrinsic::ID IID;
2396  if (Name[17] == 's')
2397  IID = Intrinsic::x86_avx512_min_ps_512;
2398  else
2399  IID = Intrinsic::x86_avx512_min_pd_512;
2400 
2401  Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
2402  { CI->getArgOperand(0), CI->getArgOperand(1),
2403  CI->getArgOperand(4) });
2404  Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
2405  CI->getArgOperand(2));
2406  } else if (IsX86 && Name.startswith("avx512.mask.lzcnt.")) {
2407  Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(),
2408  Intrinsic::ctlz,
2409  CI->getType()),
2410  { CI->getArgOperand(0), Builder.getInt1(false) });
2411  Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
2412  CI->getArgOperand(1));
2413  } else if (IsX86 && Name.startswith("avx512.mask.psll")) {
2414  bool IsImmediate = Name[16] == 'i' ||
2415  (Name.size() > 18 && Name[18] == 'i');
2416  bool IsVariable = Name[16] == 'v';
2417  char Size = Name[16] == '.' ? Name[17] :
2418  Name[17] == '.' ? Name[18] :
2419  Name[18] == '.' ? Name[19] :
2420  Name[20];
2421 
2422  Intrinsic::ID IID;
2423  if (IsVariable && Name[17] != '.') {
2424  if (Size == 'd' && Name[17] == '2') // avx512.mask.psllv2.di
2425  IID = Intrinsic::x86_avx2_psllv_q;
2426  else if (Size == 'd' && Name[17] == '4') // avx512.mask.psllv4.di
2427  IID = Intrinsic::x86_avx2_psllv_q_256;
2428  else if (Size == 's' && Name[17] == '4') // avx512.mask.psllv4.si
2429  IID = Intrinsic::x86_avx2_psllv_d;
2430  else if (Size == 's' && Name[17] == '8') // avx512.mask.psllv8.si
2431  IID = Intrinsic::x86_avx2_psllv_d_256;
2432  else if (Size == 'h' && Name[17] == '8') // avx512.mask.psllv8.hi
2433  IID = Intrinsic::x86_avx512_psllv_w_128;
2434  else if (Size == 'h' && Name[17] == '1') // avx512.mask.psllv16.hi
2435  IID = Intrinsic::x86_avx512_psllv_w_256;
2436  else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psllv32hi
2437  IID = Intrinsic::x86_avx512_psllv_w_512;
2438  else
2439  llvm_unreachable("Unexpected size");
2440  } else if (Name.endswith(".128")) {
2441  if (Size == 'd') // avx512.mask.psll.d.128, avx512.mask.psll.di.128
2442  IID = IsImmediate ? Intrinsic::x86_sse2_pslli_d
2443  : Intrinsic::x86_sse2_psll_d;
2444  else if (Size == 'q') // avx512.mask.psll.q.128, avx512.mask.psll.qi.128
2445  IID = IsImmediate ? Intrinsic::x86_sse2_pslli_q
2446  : Intrinsic::x86_sse2_psll_q;
2447  else if (Size == 'w') // avx512.mask.psll.w.128, avx512.mask.psll.wi.128
2448  IID = IsImmediate ? Intrinsic::x86_sse2_pslli_w
2449  : Intrinsic::x86_sse2_psll_w;
2450  else
2451  llvm_unreachable("Unexpected size");
2452  } else if (Name.endswith(".256")) {
2453  if (Size == 'd') // avx512.mask.psll.d.256, avx512.mask.psll.di.256
2454  IID = IsImmediate ? Intrinsic::x86_avx2_pslli_d
2455  : Intrinsic::x86_avx2_psll_d;
2456  else if (Size == 'q') // avx512.mask.psll.q.256, avx512.mask.psll.qi.256
2457  IID = IsImmediate ? Intrinsic::x86_avx2_pslli_q
2458  : Intrinsic::x86_avx2_psll_q;
2459  else if (Size == 'w') // avx512.mask.psll.w.256, avx512.mask.psll.wi.256
2460  IID = IsImmediate ? Intrinsic::x86_avx2_pslli_w
2461  : Intrinsic::x86_avx2_psll_w;
2462  else
2463  llvm_unreachable("Unexpected size");
2464  } else {
2465  if (Size == 'd') // psll.di.512, pslli.d, psll.d, psllv.d.512
2466  IID = IsImmediate ? Intrinsic::x86_avx512_pslli_d_512 :
2467  IsVariable ? Intrinsic::x86_avx512_psllv_d_512 :
2468  Intrinsic::x86_avx512_psll_d_512;
2469  else if (Size == 'q') // psll.qi.512, pslli.q, psll.q, psllv.q.512
2470  IID = IsImmediate ? Intrinsic::x86_avx512_pslli_q_512 :
2471  IsVariable ? Intrinsic::x86_avx512_psllv_q_512 :
2472  Intrinsic::x86_avx512_psll_q_512;
2473  else if (Size == 'w') // psll.wi.512, pslli.w, psll.w
2474  IID = IsImmediate ? Intrinsic::x86_avx512_pslli_w_512
2475  : Intrinsic::x86_avx512_psll_w_512;
2476  else
2477  llvm_unreachable("Unexpected size");
2478  }
2479 
2480  Rep = UpgradeX86MaskedShift(Builder, *CI, IID);
2481  } else if (IsX86 && Name.startswith("avx512.mask.psrl")) {
2482  bool IsImmediate = Name[16] == 'i' ||
2483  (Name.size() > 18 && Name[18] == 'i');
2484  bool IsVariable = Name[16] == 'v';
2485  char Size = Name[16] == '.' ? Name[17] :
2486  Name[17] == '.' ? Name[18] :
2487  Name[18] == '.' ? Name[19] :
2488  Name[20];
2489 
2490  Intrinsic::ID IID;
2491  if (IsVariable && Name[17] != '.') {
2492  if (Size == 'd' && Name[17] == '2') // avx512.mask.psrlv2.di
2493  IID = Intrinsic::x86_avx2_psrlv_q;
2494  else if (Size == 'd' && Name[17] == '4') // avx512.mask.psrlv4.di
2495  IID = Intrinsic::x86_avx2_psrlv_q_256;
2496  else if (Size == 's' && Name[17] == '4') // avx512.mask.psrlv4.si
2497  IID = Intrinsic::x86_avx2_psrlv_d;
2498  else if (Size == 's' && Name[17] == '8') // avx512.mask.psrlv8.si
2499  IID = Intrinsic::x86_avx2_psrlv_d_256;
2500  else if (Size == 'h' && Name[17] == '8') // avx512.mask.psrlv8.hi
2501  IID = Intrinsic::x86_avx512_psrlv_w_128;
2502  else if (Size == 'h' && Name[17] == '1') // avx512.mask.psrlv16.hi
2503  IID = Intrinsic::x86_avx512_psrlv_w_256;
2504  else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psrlv32hi
2505  IID = Intrinsic::x86_avx512_psrlv_w_512;
2506  else
2507  llvm_unreachable("Unexpected size");
2508  } else if (Name.endswith(".128")) {
2509  if (Size == 'd') // avx512.mask.psrl.d.128, avx512.mask.psrl.di.128
2510  IID = IsImmediate ? Intrinsic::x86_sse2_psrli_d
2511  : Intrinsic::x86_sse2_psrl_d;
2512  else if (Size == 'q') // avx512.mask.psrl.q.128, avx512.mask.psrl.qi.128
2513  IID = IsImmediate ? Intrinsic::x86_sse2_psrli_q
2514  : Intrinsic::x86_sse2_psrl_q;
2515  else if (Size == 'w') // avx512.mask.psrl.w.128, avx512.mask.psrl.wi.128
2516  IID = IsImmediate ? Intrinsic::x86_sse2_psrli_w
2517  : Intrinsic::x86_sse2_psrl_w;
2518  else
2519  llvm_unreachable("Unexpected size");
2520  } else if (Name.endswith(".256")) {
2521  if (Size == 'd') // avx512.mask.psrl.d.256, avx512.mask.psrl.di.256
2522  IID = IsImmediate ? Intrinsic::x86_avx2_psrli_d
2523  : Intrinsic::x86_avx2_psrl_d;
2524  else if (Size == 'q') // avx512.mask.psrl.q.256, avx512.mask.psrl.qi.256
2525  IID = IsImmediate ? Intrinsic::x86_avx2_psrli_q
2526  : Intrinsic::x86_avx2_psrl_q;
2527  else if (Size == 'w') // avx512.mask.psrl.w.256, avx512.mask.psrl.wi.256
2528  IID = IsImmediate ? Intrinsic::x86_avx2_psrli_w
2529  : Intrinsic::x86_avx2_psrl_w;
2530  else
2531  llvm_unreachable("Unexpected size");
2532  } else {
2533  if (Size == 'd') // psrl.di.512, psrli.d, psrl.d, psrl.d.512
2534  IID = IsImmediate ? Intrinsic::x86_avx512_psrli_d_512 :
2535  IsVariable ? Intrinsic::x86_avx512_psrlv_d_512 :
2536  Intrinsic::x86_avx512_psrl_d_512;
2537  else if (Size == 'q') // psrl.qi.512, psrli.q, psrl.q, psrl.q.512
2538  IID = IsImmediate ? Intrinsic::x86_avx512_psrli_q_512 :
2539  IsVariable ? Intrinsic::x86_avx512_psrlv_q_512 :
2540  Intrinsic::x86_avx512_psrl_q_512;
2541  else if (Size == 'w') // psrl.wi.512, psrli.w, psrl.w)
2542  IID = IsImmediate ? Intrinsic::x86_avx512_psrli_w_512
2543  : Intrinsic::x86_avx512_psrl_w_512;
2544  else
2545  llvm_unreachable("Unexpected size");
2546  }
2547 
2548  Rep = UpgradeX86MaskedShift(Builder, *CI, IID);
2549  } else if (IsX86 && Name.startswith("avx512.mask.psra")) {
2550  bool IsImmediate = Name[16] == 'i' ||
2551  (Name.size() > 18 && Name[18] == 'i');
2552  bool IsVariable = Name[16] == 'v';
2553  char Size = Name[16] == '.' ? Name[17] :
2554  Name[17] == '.' ? Name[18] :
2555  Name[18] == '.' ? Name[19] :
2556  Name[20];
2557 
2558  Intrinsic::ID IID;
2559  if (IsVariable && Name[17] != '.') {
2560  if (Size == 's' && Name[17] == '4') // avx512.mask.psrav4.si
2561  IID = Intrinsic::x86_avx2_psrav_d;
2562  else if (Size == 's' && Name[17] == '8') // avx512.mask.psrav8.si
2563  IID = Intrinsic::x86_avx2_psrav_d_256;
2564  else if (Size == 'h' && Name[17] == '8') // avx512.mask.psrav8.hi
2565  IID = Intrinsic::x86_avx512_psrav_w_128;
2566  else if (Size == 'h' && Name[17] == '1') // avx512.mask.psrav16.hi
2567  IID = Intrinsic::x86_avx512_psrav_w_256;
2568  else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psrav32hi
2569  IID = Intrinsic::x86_avx512_psrav_w_512;
2570  else
2571  llvm_unreachable("Unexpected size");
2572  } else if (Name.endswith(".128")) {
2573  if (Size == 'd') // avx512.mask.psra.d.128, avx512.mask.psra.di.128
2574  IID = IsImmediate ? Intrinsic::x86_sse2_psrai_d
2575  : Intrinsic::x86_sse2_psra_d;
2576  else if (Size == 'q') // avx512.mask.psra.q.128, avx512.mask.psra.qi.128
2577  IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_128 :
2578  IsVariable ? Intrinsic::x86_avx512_psrav_q_128 :
2579  Intrinsic::x86_avx512_psra_q_128;
2580  else if (Size == 'w') // avx512.mask.psra.w.128, avx512.mask.psra.wi.128
2581  IID = IsImmediate ? Intrinsic::x86_sse2_psrai_w
2582  : Intrinsic::x86_sse2_psra_w;
2583  else
2584  llvm_unreachable("Unexpected size");
2585  } else if (Name.endswith(".256")) {
2586  if (Size == 'd') // avx512.mask.psra.d.256, avx512.mask.psra.di.256
2587  IID = IsImmediate ? Intrinsic::x86_avx2_psrai_d
2588  : Intrinsic::x86_avx2_psra_d;
2589  else if (Size == 'q') // avx512.mask.psra.q.256, avx512.mask.psra.qi.256
2590  IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_256 :
2591  IsVariable ? Intrinsic::x86_avx512_psrav_q_256 :
2592  Intrinsic::x86_avx512_psra_q_256;
2593  else if (Size == 'w') // avx512.mask.psra.w.256, avx512.mask.psra.wi.256
2594  IID = IsImmediate ? Intrinsic::x86_avx2_psrai_w
2595  : Intrinsic::x86_avx2_psra_w;
2596  else
2597  llvm_unreachable("Unexpected size");
2598  } else {
2599  if (Size == 'd') // psra.di.512, psrai.d, psra.d, psrav.d.512
2600  IID = IsImmediate ? Intrinsic::x86_avx512_psrai_d_512 :
2601  IsVariable ? Intrinsic::x86_avx512_psrav_d_512 :
2602  Intrinsic::x86_avx512_psra_d_512;
2603  else if (Size == 'q') // psra.qi.512, psrai.q, psra.q
2604  IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_512 :
2605  IsVariable ? Intrinsic::x86_avx512_psrav_q_512 :
2606  Intrinsic::x86_avx512_psra_q_512;
2607  else if (Size == 'w') // psra.wi.512, psrai.w, psra.w
2608  IID = IsImmediate ? Intrinsic::x86_avx512_psrai_w_512
2609  : Intrinsic::x86_avx512_psra_w_512;
2610  else
2611  llvm_unreachable("Unexpected size");
2612  }
2613 
2614  Rep = UpgradeX86MaskedShift(Builder, *CI, IID);
2615  } else if (IsX86 && Name.startswith("avx512.mask.move.s")) {
2616  Rep = upgradeMaskedMove(Builder, *CI);
2617  } else if (IsX86 && Name.startswith("avx512.cvtmask2")) {
2618  Rep = UpgradeMaskToInt(Builder, *CI);
2619  } else if (IsX86 && Name.endswith(".movntdqa")) {
2620  Module *M = F->getParent();
2621  MDNode *Node = MDNode::get(
2623 
2624  Value *Ptr = CI->getArgOperand(0);
2625  VectorType *VTy = cast<VectorType>(CI->getType());
2626 
2627  // Convert the type of the pointer to a pointer to the stored type.
2628  Value *BC =
2629  Builder.CreateBitCast(Ptr, PointerType::getUnqual(VTy), "cast");
2630  LoadInst *LI = Builder.CreateAlignedLoad(BC, VTy->getBitWidth() / 8);
2631  LI->setMetadata(M->getMDKindID("nontemporal"), Node);
2632  Rep = LI;
2633  } else if (IsX86 &&
2634  (Name.startswith("sse2.pavg") || Name.startswith("avx2.pavg") ||
2635  Name.startswith("avx512.mask.pavg"))) {
2636  // llvm.x86.sse2.pavg.b/w, llvm.x86.avx2.pavg.b/w,
2637  // llvm.x86.avx512.mask.pavg.b/w
2638  Value *A = CI->getArgOperand(0);
2639  Value *B = CI->getArgOperand(1);
2641  cast<VectorType>(A->getType()));
2642  Value *ExtendedA = Builder.CreateZExt(A, ZextType);
2643  Value *ExtendedB = Builder.CreateZExt(B, ZextType);
2644  Value *Sum = Builder.CreateAdd(ExtendedA, ExtendedB);
2645  Value *AddOne = Builder.CreateAdd(Sum, ConstantInt::get(ZextType, 1));
2646  Value *ShiftR = Builder.CreateLShr(AddOne, ConstantInt::get(ZextType, 1));
2647  Rep = Builder.CreateTrunc(ShiftR, A->getType());
2648  if (CI->getNumArgOperands() > 2) {
2649  Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
2650  CI->getArgOperand(2));
2651  }
2652  } else if (IsX86 && Name.startswith("fma.vfmsub")) {
2653  // Handle FMSUB and FSUBADD.
2654  unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
2655  unsigned EltWidth = CI->getType()->getScalarSizeInBits();
2656  Intrinsic::ID IID;
2657  if (Name[10] == '.' && Name[11] == 'p') {
2658  // Packed FMSUB
2659  if (VecWidth == 128 && EltWidth == 32)
2660  IID = Intrinsic::x86_fma_vfmadd_ps;
2661  else if (VecWidth == 128 && EltWidth == 64)
2662  IID = Intrinsic::x86_fma_vfmadd_pd;
2663  else if (VecWidth == 256 && EltWidth == 32)
2664  IID = Intrinsic::x86_fma_vfmadd_ps_256;
2665  else if (VecWidth == 256 && EltWidth == 64)
2666  IID = Intrinsic::x86_fma_vfmadd_pd_256;
2667  else
2668  llvm_unreachable("Unexpected intrinsic");
2669  } else if (Name[10] == '.' && Name[11] == 's') {
2670  // Scalar FMSUB
2671  if (EltWidth == 32)
2672  IID = Intrinsic::x86_fma_vfmadd_ss;
2673  else if (EltWidth == 64)
2674  IID = Intrinsic::x86_fma_vfmadd_sd;
2675  else
2676  llvm_unreachable("Unexpected intrinsic");
2677  } else {
2678  // FMSUBADD
2679  if (VecWidth == 128 && EltWidth == 32)
2680  IID = Intrinsic::x86_fma_vfmaddsub_ps;
2681  else if (VecWidth == 128 && EltWidth == 64)
2682  IID = Intrinsic::x86_fma_vfmaddsub_pd;
2683  else if (VecWidth == 256 && EltWidth == 32)
2684  IID = Intrinsic::x86_fma_vfmaddsub_ps_256;
2685  else if (VecWidth == 256 && EltWidth == 64)
2686  IID = Intrinsic::x86_fma_vfmaddsub_pd_256;
2687  else
2688  llvm_unreachable("Unexpected intrinsic");
2689  }
2690  Value *Arg2 = Builder.CreateFNeg(CI->getArgOperand(2));
2691  Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1), Arg2 };
2692  Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID),
2693  Ops);
2694  } else if (IsX86 && (Name.startswith("fma.vfnmadd.") ||
2695  Name.startswith("fma.vfnmsub."))) {
2696  Value *Arg0 = CI->getArgOperand(0);
2697  Value *Arg1 = CI->getArgOperand(1);
2698  Value *Arg2 = CI->getArgOperand(2);
2699  unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
2700  unsigned EltWidth = CI->getType()->getScalarSizeInBits();
2701  Intrinsic::ID IID;
2702  if (Name[12] == 'p') {
2703  // Packed FNMADD/FNSUB
2704  Arg0 = Builder.CreateFNeg(Arg0);
2705  if (VecWidth == 128 && EltWidth == 32)
2706  IID = Intrinsic::x86_fma_vfmadd_ps;
2707  else if (VecWidth == 128 && EltWidth == 64)
2708  IID = Intrinsic::x86_fma_vfmadd_pd;
2709  else if (VecWidth == 256 && EltWidth == 32)
2710  IID = Intrinsic::x86_fma_vfmadd_ps_256;
2711  else if (VecWidth == 256 && EltWidth == 64)
2712  IID = Intrinsic::x86_fma_vfmadd_pd_256;
2713  else
2714  llvm_unreachable("Unexpected intrinsic");
2715  } else {
2716  // Scalar FNMADD/FNMSUB
2717  Arg1 = Builder.CreateFNeg(Arg1); // Arg0 is passthru so invert Arg1.
2718  if (EltWidth == 32)
2719  IID = Intrinsic::x86_fma_vfmadd_ss;
2720  else if (EltWidth == 64)
2721  IID = Intrinsic::x86_fma_vfmadd_sd;
2722  else
2723  llvm_unreachable("Unexpected intrinsic");
2724  }
2725  // Invert for FNMSUB.
2726  if (Name[8] == 's')
2727  Arg2 = Builder.CreateFNeg(Arg2);
2728  Value *Ops[] = { Arg0, Arg1, Arg2 };
2729  Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID),
2730  Ops);
2731  } else if (IsX86 && (Name.startswith("avx512.mask.pternlog.") ||
2732  Name.startswith("avx512.maskz.pternlog."))) {
2733  bool ZeroMask = Name[11] == 'z';
2734  unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
2735  unsigned EltWidth = CI->getType()->getScalarSizeInBits();
2736  Intrinsic::ID IID;
2737  if (VecWidth == 128 && EltWidth == 32)
2738  IID = Intrinsic::x86_avx512_pternlog_d_128;
2739  else if (VecWidth == 256 && EltWidth == 32)
2740  IID = Intrinsic::x86_avx512_pternlog_d_256;
2741  else if (VecWidth == 512 && EltWidth == 32)
2742  IID = Intrinsic::x86_avx512_pternlog_d_512;
2743  else if (VecWidth == 128 && EltWidth == 64)
2744  IID = Intrinsic::x86_avx512_pternlog_q_128;
2745  else if (VecWidth == 256 && EltWidth == 64)
2746  IID = Intrinsic::x86_avx512_pternlog_q_256;
2747  else if (VecWidth == 512 && EltWidth == 64)
2748  IID = Intrinsic::x86_avx512_pternlog_q_512;
2749  else
2750  llvm_unreachable("Unexpected intrinsic");
2751 
2752  Value *Args[] = { CI->getArgOperand(0) , CI->getArgOperand(1),
2753  CI->getArgOperand(2), CI->getArgOperand(3) };
2754  Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID),
2755  Args);
2756  Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType())
2757  : CI->getArgOperand(0);
2758  Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep, PassThru);
2759  } else if (IsX86 && (Name.startswith("avx512.mask.vpmadd52") ||
2760  Name.startswith("avx512.maskz.vpmadd52"))) {
2761  bool ZeroMask = Name[11] == 'z';
2762  bool High = Name[20] == 'h' || Name[21] == 'h';
2763  unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
2764  Intrinsic::ID IID;
2765  if (VecWidth == 128 && !High)
2766  IID = Intrinsic::x86_avx512_vpmadd52l_uq_128;
2767  else if (VecWidth == 256 && !High)
2768  IID = Intrinsic::x86_avx512_vpmadd52l_uq_256;
2769  else if (VecWidth == 512 && !High)
2770  IID = Intrinsic::x86_avx512_vpmadd52l_uq_512;
2771  else if (VecWidth == 128 && High)
2772  IID = Intrinsic::x86_avx512_vpmadd52h_uq_128;
2773  else if (VecWidth == 256 && High)
2774  IID = Intrinsic::x86_avx512_vpmadd52h_uq_256;
2775  else if (VecWidth == 512 && High)
2776  IID = Intrinsic::x86_avx512_vpmadd52h_uq_512;
2777  else
2778  llvm_unreachable("Unexpected intrinsic");
2779 
2780  Value *Args[] = { CI->getArgOperand(0) , CI->getArgOperand(1),
2781  CI->getArgOperand(2) };
2782  Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID),
2783  Args);
2784  Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType())
2785  : CI->getArgOperand(0);
2786  Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
2787  } else if (IsX86 && (Name.startswith("avx512.mask.vpermi2var.") ||
2788  Name.startswith("avx512.mask.vpermt2var.") ||
2789  Name.startswith("avx512.maskz.vpermt2var."))) {
2790  bool ZeroMask = Name[11] == 'z';
2791  bool IndexForm = Name[17] == 'i';
2792  unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
2793  unsigned EltWidth = CI->getType()->getScalarSizeInBits();
2794  bool IsFloat = CI->getType()->isFPOrFPVectorTy();
2795  Intrinsic::ID IID;
2796  if (VecWidth == 128 && EltWidth == 32 && IsFloat)
2797  IID = Intrinsic::x86_avx512_vpermi2var_ps_128;
2798  else if (VecWidth == 128 && EltWidth == 32 && !IsFloat)
2799  IID = Intrinsic::x86_avx512_vpermi2var_d_128;
2800  else if (VecWidth == 128 && EltWidth == 64 && IsFloat)
2801  IID = Intrinsic::x86_avx512_vpermi2var_pd_128;
2802  else if (VecWidth == 128 && EltWidth == 64 && !IsFloat)
2803  IID = Intrinsic::x86_avx512_vpermi2var_q_128;
2804  else if (VecWidth == 256 && EltWidth == 32 && IsFloat)
2805  IID = Intrinsic::x86_avx512_vpermi2var_ps_256;
2806  else if (VecWidth == 256 && EltWidth == 32 && !IsFloat)
2807  IID = Intrinsic::x86_avx512_vpermi2var_d_256;
2808  else if (VecWidth == 256 && EltWidth == 64 && IsFloat)
2809  IID = Intrinsic::x86_avx512_vpermi2var_pd_256;
2810  else if (VecWidth == 256 && EltWidth == 64 && !IsFloat)
2811  IID = Intrinsic::x86_avx512_vpermi2var_q_256;
2812  else if (VecWidth == 512 && EltWidth == 32 && IsFloat)
2813  IID = Intrinsic::x86_avx512_vpermi2var_ps_512;
2814  else if (VecWidth == 512 && EltWidth == 32 && !IsFloat)
2815  IID = Intrinsic::x86_avx512_vpermi2var_d_512;
2816  else if (VecWidth == 512 && EltWidth == 64 && IsFloat)
2817  IID = Intrinsic::x86_avx512_vpermi2var_pd_512;
2818  else if (VecWidth == 512 && EltWidth == 64 && !IsFloat)
2819  IID = Intrinsic::x86_avx512_vpermi2var_q_512;
2820  else if (VecWidth == 128 && EltWidth == 16)
2821  IID = Intrinsic::x86_avx512_vpermi2var_hi_128;
2822  else if (VecWidth == 256 && EltWidth == 16)
2823  IID = Intrinsic::x86_avx512_vpermi2var_hi_256;
2824  else if (VecWidth == 512 && EltWidth == 16)
2825  IID = Intrinsic::x86_avx512_vpermi2var_hi_512;
2826  else if (VecWidth == 128 && EltWidth == 8)
2827  IID = Intrinsic::x86_avx512_vpermi2var_qi_128;
2828  else if (VecWidth == 256 && EltWidth == 8)
2829  IID = Intrinsic::x86_avx512_vpermi2var_qi_256;
2830  else if (VecWidth == 512 && EltWidth == 8)
2831  IID = Intrinsic::x86_avx512_vpermi2var_qi_512;
2832  else
2833  llvm_unreachable("Unexpected intrinsic");
2834 
2835  Value *Args[] = { CI->getArgOperand(0) , CI->getArgOperand(1),
2836  CI->getArgOperand(2) };
2837 
2838  // If this isn't index form we need to swap operand 0 and 1.
2839  if (!IndexForm)
2840  std::swap(Args[0], Args[1]);
2841 
2842  Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID),
2843  Args);
2844  Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType())
2845  : Builder.CreateBitCast(CI->getArgOperand(1),
2846  CI->getType());
2847  Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
2848  } else if (IsX86 && (Name.startswith("avx512.mask.vpdpbusd.") ||
2849  Name.startswith("avx512.maskz.vpdpbusd.") ||
2850  Name.startswith("avx512.mask.vpdpbusds.") ||
2851  Name.startswith("avx512.maskz.vpdpbusds."))) {
2852  bool ZeroMask = Name[11] == 'z';
2853  bool IsSaturating = Name[ZeroMask ? 21 : 20] == 's';
2854  unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
2855  Intrinsic::ID IID;
2856  if (VecWidth == 128 && !IsSaturating)
2857  IID = Intrinsic::x86_avx512_vpdpbusd_128;
2858  else if (VecWidth == 256 && !IsSaturating)
2859  IID = Intrinsic::x86_avx512_vpdpbusd_256;
2860  else if (VecWidth == 512 && !IsSaturating)
2861  IID = Intrinsic::x86_avx512_vpdpbusd_512;
2862  else if (VecWidth == 128 && IsSaturating)
2863  IID = Intrinsic::x86_avx512_vpdpbusds_128;
2864  else if (VecWidth == 256 && IsSaturating)
2865  IID = Intrinsic::x86_avx512_vpdpbusds_256;
2866  else if (VecWidth == 512 && IsSaturating)
2867  IID = Intrinsic::x86_avx512_vpdpbusds_512;
2868  else
2869  llvm_unreachable("Unexpected intrinsic");
2870 
2871  Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(1),
2872  CI->getArgOperand(2) };
2873  Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID),
2874  Args);
2875  Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType())
2876  : CI->getArgOperand(0);
2877  Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
2878  } else if (IsX86 && (Name.startswith("avx512.mask.vpdpwssd.") ||
2879  Name.startswith("avx512.maskz.vpdpwssd.") ||
2880  Name.startswith("avx512.mask.vpdpwssds.") ||
2881  Name.startswith("avx512.maskz.vpdpwssds."))) {
2882  bool ZeroMask = Name[11] == 'z';
2883  bool IsSaturating = Name[ZeroMask ? 21 : 20] == 's';
2884  unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
2885  Intrinsic::ID IID;
2886  if (VecWidth == 128 && !IsSaturating)
2887  IID = Intrinsic::x86_avx512_vpdpwssd_128;
2888  else if (VecWidth == 256 && !IsSaturating)
2889  IID = Intrinsic::x86_avx512_vpdpwssd_256;
2890  else if (VecWidth == 512 && !IsSaturating)
2891  IID = Intrinsic::x86_avx512_vpdpwssd_512;
2892  else if (VecWidth == 128 && IsSaturating)
2893  IID = Intrinsic::x86_avx512_vpdpwssds_128;
2894  else if (VecWidth == 256 && IsSaturating)
2895  IID = Intrinsic::x86_avx512_vpdpwssds_256;
2896  else if (VecWidth == 512 && IsSaturating)
2897  IID = Intrinsic::x86_avx512_vpdpwssds_512;
2898  else
2899  llvm_unreachable("Unexpected intrinsic");
2900 
2901  Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(1),
2902  CI->getArgOperand(2) };
2903  Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID),
2904  Args);
2905  Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType())
2906  : CI->getArgOperand(0);
2907  Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
2908  } else if (IsX86 && Name.startswith("avx512.mask.") &&
2909  upgradeAVX512MaskToSelect(Name, Builder, *CI, Rep)) {
2910  // Rep will be updated by the call in the condition.
2911  } else if (IsNVVM && (Name == "abs.i" || Name == "abs.ll")) {
2912  Value *Arg = CI->getArgOperand(0);
2913  Value *Neg = Builder.CreateNeg(Arg, "neg");
2914  Value *Cmp = Builder.CreateICmpSGE(
2915  Arg, llvm::Constant::getNullValue(Arg->getType()), "abs.cond");
2916  Rep = Builder.CreateSelect(Cmp, Arg, Neg, "abs");
2917  } else if (IsNVVM && (Name == "max.i" || Name == "max.ll" ||
2918  Name == "max.ui" || Name == "max.ull")) {
2919  Value *Arg0 = CI->getArgOperand(0);
2920  Value *Arg1 = CI->getArgOperand(1);
2921  Value *Cmp = Name.endswith(".ui") || Name.endswith(".ull")
2922  ? Builder.CreateICmpUGE(Arg0, Arg1, "max.cond")
2923  : Builder.CreateICmpSGE(Arg0, Arg1, "max.cond");
2924  Rep = Builder.CreateSelect(Cmp, Arg0, Arg1, "max");
2925  } else if (IsNVVM && (Name == "min.i" || Name == "min.ll" ||
2926  Name == "min.ui" || Name == "min.ull")) {
2927  Value *Arg0 = CI->getArgOperand(0);
2928  Value *Arg1 = CI->getArgOperand(1);
2929  Value *Cmp = Name.endswith(".ui") || Name.endswith(".ull")
2930  ? Builder.CreateICmpULE(Arg0, Arg1, "min.cond")
2931  : Builder.CreateICmpSLE(Arg0, Arg1, "min.cond");
2932  Rep = Builder.CreateSelect(Cmp, Arg0, Arg1, "min");
2933  } else if (IsNVVM && Name == "clz.ll") {
2934  // llvm.nvvm.clz.ll returns an i32, but llvm.ctlz.i64 and returns an i64.
2935  Value *Arg = CI->getArgOperand(0);
2936  Value *Ctlz = Builder.CreateCall(
2937  Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctlz,
2938  {Arg->getType()}),
2939  {Arg, Builder.getFalse()}, "ctlz");
2940  Rep = Builder.CreateTrunc(Ctlz, Builder.getInt32Ty(), "ctlz.trunc");
2941  } else if (IsNVVM && Name == "popc.ll") {
2942  // llvm.nvvm.popc.ll returns an i32, but llvm.ctpop.i64 and returns an
2943  // i64.
2944  Value *Arg = CI->getArgOperand(0);
2945  Value *Popc = Builder.CreateCall(
2946  Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctpop,
2947  {Arg->getType()}),
2948  Arg, "ctpop");
2949  Rep = Builder.CreateTrunc(Popc, Builder.getInt32Ty(), "ctpop.trunc");
2950  } else if (IsNVVM && Name == "h2f") {
2951  Rep = Builder.CreateCall(Intrinsic::getDeclaration(
2952  F->getParent(), Intrinsic::convert_from_fp16,
2953  {Builder.getFloatTy()}),
2954  CI->getArgOperand(0), "h2f");
2955  } else {
2956  llvm_unreachable("Unknown function for CallInst upgrade.");
2957  }
2958 
2959  if (Rep)
2960  CI->replaceAllUsesWith(Rep);
2961  CI->eraseFromParent();
2962  return;
2963  }
2964 
2965  const auto &DefaultCase = [&NewFn, &CI]() -> void {
2966  // Handle generic mangling change, but nothing else
2967  assert(
2968  (CI->getCalledFunction()->getName() != NewFn->getName()) &&
2969  "Unknown function for CallInst upgrade and isn't just a name change");
2970  CI->setCalledFunction(NewFn);
2971  };
2972  CallInst *NewCall = nullptr;
2973  switch (NewFn->getIntrinsicID()) {
2974  default: {
2975  DefaultCase();
2976  return;
2977  }
2978 
2979  case Intrinsic::arm_neon_vld1:
2980  case Intrinsic::arm_neon_vld2:
2981  case Intrinsic::arm_neon_vld3:
2982  case Intrinsic::arm_neon_vld4:
2983  case Intrinsic::arm_neon_vld2lane:
2984  case Intrinsic::arm_neon_vld3lane:
2985  case Intrinsic::arm_neon_vld4lane:
2986  case Intrinsic::arm_neon_vst1:
2987  case Intrinsic::arm_neon_vst2:
2988  case Intrinsic::arm_neon_vst3:
2989  case Intrinsic::arm_neon_vst4:
2990  case Intrinsic::arm_neon_vst2lane:
2991  case Intrinsic::arm_neon_vst3lane:
2992  case Intrinsic::arm_neon_vst4lane: {
2994  CI->arg_operands().end());
2995  NewCall = Builder.CreateCall(NewFn, Args);
2996  break;
2997  }
2998 
2999  case Intrinsic::bitreverse:
3000  NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)});
3001  break;
3002 
3003  case Intrinsic::ctlz:
3004  case Intrinsic::cttz:
3005  assert(CI->getNumArgOperands() == 1 &&
3006  "Mismatch between function args and call args");
3007  NewCall =
3008  Builder.CreateCall(NewFn, {CI->getArgOperand(0), Builder.getFalse()});
3009  break;
3010 
3011  case Intrinsic::objectsize: {
3012  Value *NullIsUnknownSize = CI->getNumArgOperands() == 2
3013  ? Builder.getFalse()
3014  : CI->getArgOperand(2);
3015  NewCall = Builder.CreateCall(
3016  NewFn, {CI->getArgOperand(0), CI->getArgOperand(1), NullIsUnknownSize});
3017  break;
3018  }
3019 
3020  case Intrinsic::ctpop:
3021  NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)});
3022  break;
3023 
3024  case Intrinsic::convert_from_fp16:
3025  NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)});
3026  break;
3027 
3028  case Intrinsic::dbg_value:
3029  // Upgrade from the old version that had an extra offset argument.
3030  assert(CI->getNumArgOperands() == 4);
3031  // Drop nonzero offsets instead of attempting to upgrade them.
3032  if (auto *Offset = dyn_cast_or_null<Constant>(CI->getArgOperand(1)))
3033  if (Offset->isZeroValue()) {
3034  NewCall = Builder.CreateCall(
3035  NewFn,
3036  {CI->getArgOperand(0), CI->getArgOperand(2), CI->getArgOperand(3)});
3037  break;
3038  }
3039  CI->eraseFromParent();
3040  return;
3041 
3042  case Intrinsic::x86_xop_vfrcz_ss:
3043  case Intrinsic::x86_xop_vfrcz_sd:
3044  NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(1)});
3045  break;
3046 
3047  case Intrinsic::x86_xop_vpermil2pd:
3048  case Intrinsic::x86_xop_vpermil2ps:
3049  case Intrinsic::x86_xop_vpermil2pd_256:
3050  case Intrinsic::x86_xop_vpermil2ps_256: {
3052  CI->arg_operands().end());
3053  VectorType *FltIdxTy = cast<VectorType>(Args[2]->getType());
3054  VectorType *IntIdxTy = VectorType::getInteger(FltIdxTy);
3055  Args[2] = Builder.CreateBitCast(Args[2], IntIdxTy);
3056  NewCall = Builder.CreateCall(NewFn, Args);
3057  break;
3058  }
3059 
3060  case Intrinsic::x86_sse41_ptestc:
3061  case Intrinsic::x86_sse41_ptestz:
3062  case Intrinsic::x86_sse41_ptestnzc: {
3063  // The arguments for these intrinsics used to be v4f32, and changed
3064  // to v2i64. This is purely a nop, since those are bitwise intrinsics.
3065  // So, the only thing required is a bitcast for both arguments.
3066  // First, check the arguments have the old type.
3067  Value *Arg0 = CI->getArgOperand(0);
3068  if (Arg0->getType() != VectorType::get(Type::getFloatTy(C), 4))
3069  return;
3070 
3071  // Old intrinsic, add bitcasts
3072  Value *Arg1 = CI->getArgOperand(1);
3073 
3074  Type *NewVecTy = VectorType::get(Type::getInt64Ty(C), 2);
3075 
3076  Value *BC0 = Builder.CreateBitCast(Arg0, NewVecTy, "cast");
3077  Value *BC1 = Builder.CreateBitCast(Arg1, NewVecTy, "cast");
3078 
3079  NewCall = Builder.CreateCall(NewFn, {BC0, BC1});
3080  break;
3081  }
3082 
3083  case Intrinsic::x86_sse41_insertps:
3084  case Intrinsic::x86_sse41_dppd:
3085  case Intrinsic::x86_sse41_dpps:
3086  case Intrinsic::x86_sse41_mpsadbw:
3087  case Intrinsic::x86_avx_dp_ps_256:
3088  case Intrinsic::x86_avx2_mpsadbw: {
3089  // Need to truncate the last argument from i32 to i8 -- this argument models
3090  // an inherently 8-bit immediate operand to these x86 instructions.
3092  CI->arg_operands().end());
3093 
3094  // Replace the last argument with a trunc.
3095  Args.back() = Builder.CreateTrunc(Args.back(), Type::getInt8Ty(C), "trunc");
3096  NewCall = Builder.CreateCall(NewFn, Args);
3097  break;
3098  }
3099 
3100  case Intrinsic::x86_avx512_mask_cmp_pd_128:
3101  case Intrinsic::x86_avx512_mask_cmp_pd_256:
3102  case Intrinsic::x86_avx512_mask_cmp_pd_512:
3103  case Intrinsic::x86_avx512_mask_cmp_ps_128:
3104  case Intrinsic::x86_avx512_mask_cmp_ps_256:
3105  case Intrinsic::x86_avx512_mask_cmp_ps_512: {
3107  Args.push_back(CI->getArgOperand(0));
3108  Args.push_back(CI->getArgOperand(1));
3109  Args.push_back(CI->getArgOperand(2));
3110  if (CI->getNumArgOperands() == 5)
3111  Args.push_back(CI->getArgOperand(4));
3112 
3113  NewCall = Builder.CreateCall(NewFn, Args);
3114  unsigned NumElts = Args[0]->getType()->getVectorNumElements();
3115  Value *Res = ApplyX86MaskOn1BitsVec(Builder, NewCall, CI->getArgOperand(3),
3116  NumElts);
3117 
3118  std::string Name = CI->getName();
3119  if (!Name.empty()) {
3120  CI->setName(Name + ".old");
3121  NewCall->setName(Name);
3122  }
3123  CI->replaceAllUsesWith(Res);
3124  CI->eraseFromParent();
3125  return;
3126  }
3127 
3128  case Intrinsic::thread_pointer: {
3129  NewCall = Builder.CreateCall(NewFn, {});
3130  break;
3131  }
3132 
3133  case Intrinsic::invariant_start:
3134  case Intrinsic::invariant_end:
3135  case Intrinsic::masked_load:
3136  case Intrinsic::masked_store:
3137  case Intrinsic::masked_gather:
3138  case Intrinsic::masked_scatter: {
3140  CI->arg_operands().end());
3141  NewCall = Builder.CreateCall(NewFn, Args);
3142  break;
3143  }
3144 
3145  case Intrinsic::memcpy:
3146  case Intrinsic::memmove:
3147  case Intrinsic::memset: {
3148  // We have to make sure that the call signature is what we're expecting.
3149  // We only want to change the old signatures by removing the alignment arg:
3150  // @llvm.mem[cpy|move]...(i8*, i8*, i[32|i64], i32, i1)
3151  // -> @llvm.mem[cpy|move]...(i8*, i8*, i[32|i64], i1)
3152  // @llvm.memset...(i8*, i8, i[32|64], i32, i1)
3153  // -> @llvm.memset...(i8*, i8, i[32|64], i1)
3154  // Note: i8*'s in the above can be any pointer type
3155  if (CI->getNumArgOperands() != 5) {
3156  DefaultCase();
3157  return;
3158  }
3159  // Remove alignment argument (3), and add alignment attributes to the
3160  // dest/src pointers.
3161  Value *Args[4] = {CI->getArgOperand(0), CI->getArgOperand(1),
3162  CI->getArgOperand(2), CI->getArgOperand(4)};
3163  NewCall = Builder.CreateCall(NewFn, Args);
3164  auto *MemCI = cast<MemIntrinsic>(NewCall);
3165  // All mem intrinsics support dest alignment.
3166  const ConstantInt *Align = cast<ConstantInt>(CI->getArgOperand(3));
3167  MemCI->setDestAlignment(Align->getZExtValue());
3168  // Memcpy/Memmove also support source alignment.
3169  if (auto *MTI = dyn_cast<MemTransferInst>(MemCI))
3170  MTI->setSourceAlignment(Align->getZExtValue());
3171  break;
3172  }
3173  }
3174  assert(NewCall && "Should have either set this variable or returned through "
3175  "the default case");
3176  std::string Name = CI->getName();
3177  if (!Name.empty()) {
3178  CI->setName(Name + ".old");
3179  NewCall->setName(Name);
3180  }
3181  CI->replaceAllUsesWith(NewCall);
3182  CI->eraseFromParent();
3183 }
3184 
3186  assert(F && "Illegal attempt to upgrade a non-existent intrinsic.");
3187 
3188  // Check if this function should be upgraded and get the replacement function
3189  // if there is one.
3190  Function *NewFn;
3191  if (UpgradeIntrinsicFunction(F, NewFn)) {
3192  // Replace all users of the old function with the new function or new
3193  // instructions. This is not a range loop because the call is deleted.
3194  for (auto UI = F->user_begin(), UE = F->user_end(); UI != UE; )
3195  if (CallInst *CI = dyn_cast<CallInst>(*UI++))
3196  UpgradeIntrinsicCall(CI, NewFn);
3197 
3198  // Remove old function, no longer used, from the module.
3199  F->eraseFromParent();
3200  }
3201 }
3202 
3204  // Check if the tag uses struct-path aware TBAA format.
3205  if (isa<MDNode>(MD.getOperand(0)) && MD.getNumOperands() >= 3)
3206  return &MD;
3207 
3208  auto &Context = MD.getContext();
3209  if (MD.getNumOperands() == 3) {
3210  Metadata *Elts[] = {MD.getOperand(0), MD.getOperand(1)};
3211  MDNode *ScalarType = MDNode::get(Context, Elts);
3212  // Create a MDNode <ScalarType, ScalarType, offset 0, const>
3213  Metadata *Elts2[] = {ScalarType, ScalarType,
3216  MD.getOperand(2)};
3217  return MDNode::get(Context, Elts2);
3218  }
3219  // Create a MDNode <MD, MD, offset 0>
3222  return MDNode::get(Context, Elts);
3223 }
3224 
3225 Instruction *llvm::UpgradeBitCastInst(unsigned Opc, Value *V, Type *DestTy,
3226  Instruction *&Temp) {
3227  if (Opc != Instruction::BitCast)
3228  return nullptr;
3229 
3230  Temp = nullptr;
3231  Type *SrcTy = V->getType();
3232  if (SrcTy->isPtrOrPtrVectorTy() && DestTy->isPtrOrPtrVectorTy() &&
3233  SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace()) {
3234  LLVMContext &Context = V->getContext();
3235 
3236  // We have no information about target data layout, so we assume that
3237  // the maximum pointer size is 64bit.
3238  Type *MidTy = Type::getInt64Ty(Context);
3239  Temp = CastInst::Create(Instruction::PtrToInt, V, MidTy);
3240 
3241  return CastInst::Create(Instruction::IntToPtr, Temp, DestTy);
3242  }
3243 
3244  return nullptr;
3245 }
3246 
3247 Value *llvm::UpgradeBitCastExpr(unsigned Opc, Constant *C, Type *DestTy) {
3248  if (Opc != Instruction::BitCast)
3249  return nullptr;
3250 
3251  Type *SrcTy = C->getType();
3252  if (SrcTy->isPtrOrPtrVectorTy() && DestTy->isPtrOrPtrVectorTy() &&
3253  SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace()) {
3254  LLVMContext &Context = C->getContext();
3255 
3256  // We have no information about target data layout, so we assume that
3257  // the maximum pointer size is 64bit.
3258  Type *MidTy = Type::getInt64Ty(Context);
3259 
3261  DestTy);
3262  }
3263 
3264  return nullptr;
3265 }
3266 
3267 /// Check the debug info version number, if it is out-dated, drop the debug
3268 /// info. Return true if module is modified.
3271  if (Version == DEBUG_METADATA_VERSION) {
3272  bool BrokenDebugInfo = false;
3273  if (verifyModule(M, &llvm::errs(), &BrokenDebugInfo))
3274  report_fatal_error("Broken module found, compilation aborted!");
3275  if (!BrokenDebugInfo)
3276  // Everything is ok.
3277  return false;
3278  else {
3279  // Diagnose malformed debug info.
3281  M.getContext().diagnose(Diag);
3282  }
3283  }
3284  bool Modified = StripDebugInfo(M);
3285  if (Modified && Version != DEBUG_METADATA_VERSION) {
3286  // Diagnose a version mismatch.
3287  DiagnosticInfoDebugMetadataVersion DiagVersion(M, Version);
3288  M.getContext().diagnose(DiagVersion);
3289  }
3290  return Modified;
3291 }
3292 
3294  bool Changed = false;
3295  NamedMDNode *ModRetainReleaseMarker =
3296  M.getNamedMetadata("clang.arc.retainAutoreleasedReturnValueMarker");
3297  if (ModRetainReleaseMarker) {
3298  MDNode *Op = ModRetainReleaseMarker->getOperand(0);
3299  if (Op) {
3300  MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(0));
3301  if (ID) {
3302  SmallVector<StringRef, 4> ValueComp;
3303  ID->getString().split(ValueComp, "#");
3304  if (ValueComp.size() == 2) {
3305  std::string NewValue = ValueComp[0].str() + ";" + ValueComp[1].str();
3306  Metadata *Ops[1] = {MDString::get(M.getContext(), NewValue)};
3307  ModRetainReleaseMarker->setOperand(0,
3308  MDNode::get(M.getContext(), Ops));
3309  Changed = true;
3310  }
3311  }
3312  }
3313  }
3314  return Changed;
3315 }
3316 
3318  NamedMDNode *ModFlags = M.getModuleFlagsMetadata();
3319  if (!ModFlags)
3320  return false;
3321 
3322  bool HasObjCFlag = false, HasClassProperties = false, Changed = false;
3323  for (unsigned I = 0, E = ModFlags->getNumOperands(); I != E; ++I) {
3324  MDNode *Op = ModFlags->getOperand(I);
3325  if (Op->getNumOperands() != 3)
3326  continue;
3327  MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
3328  if (!ID)
3329  continue;
3330  if (ID->getString() == "Objective-C Image Info Version")
3331  HasObjCFlag = true;
3332  if (ID->getString() == "Objective-C Class Properties")
3333  HasClassProperties = true;
3334  // Upgrade PIC/PIE Module Flags. The module flag behavior for these two
3335  // field was Error and now they are Max.
3336  if (ID->getString() == "PIC Level" || ID->getString() == "PIE Level") {
3337  if (auto *Behavior =
3338  mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0))) {
3339  if (Behavior->getLimitedValue() == Module::Error) {
3341  Metadata *Ops[3] = {
3343  MDString::get(M.getContext(), ID->getString()),
3344  Op->getOperand(2)};
3345  ModFlags->setOperand(I, MDNode::get(M.getContext(), Ops));
3346  Changed = true;
3347  }
3348  }
3349  }
3350  // Upgrade Objective-C Image Info Section. Removed the whitespce in the
3351  // section name so that llvm-lto will not complain about mismatching
3352  // module flags that is functionally the same.
3353  if (ID->getString() == "Objective-C Image Info Section") {
3354  if (auto *Value = dyn_cast_or_null<MDString>(Op->getOperand(2))) {
3355  SmallVector<StringRef, 4> ValueComp;
3356  Value->getString().split(ValueComp, " ");
3357  if (ValueComp.size() != 1) {
3358  std::string NewValue;
3359  for (auto &S : ValueComp)
3360  NewValue += S.str();
3361  Metadata *Ops[3] = {Op->getOperand(0), Op->getOperand(1),
3362  MDString::get(M.getContext(), NewValue)};
3363  ModFlags->setOperand(I, MDNode::get(M.getContext(), Ops));
3364  Changed = true;
3365  }
3366  }
3367  }
3368  }
3369 
3370  // "Objective-C Class Properties" is recently added for Objective-C. We
3371  // upgrade ObjC bitcodes to contain a "Objective-C Class Properties" module
3372  // flag of value 0, so we can correclty downgrade this flag when trying to
3373  // link an ObjC bitcode without this module flag with an ObjC bitcode with
3374  // this module flag.
3375  if (HasObjCFlag && !HasClassProperties) {
3376  M.addModuleFlag(llvm::Module::Override, "Objective-C Class Properties",
3377  (uint32_t)0);
3378  Changed = true;
3379  }
3380 
3381  return Changed;
3382 }
3383 
3385  auto TrimSpaces = [](StringRef Section) -> std::string {
3386  SmallVector<StringRef, 5> Components;
3387  Section.split(Components, ',');
3388 
3389  SmallString<32> Buffer;
3390  raw_svector_ostream OS(Buffer);
3391 
3392  for (auto Component : Components)
3393  OS << ',' << Component.trim();
3394 
3395  return OS.str().substr(1);
3396  };
3397 
3398  for (auto &GV : M.globals()) {
3399  if (!GV.hasSection())
3400  continue;
3401 
3402  StringRef Section = GV.getSection();
3403 
3404  if (!Section.startswith("__DATA, __objc_catlist"))
3405  continue;
3406 
3407  // __DATA, __objc_catlist, regular, no_dead_strip
3408  // __DATA,__objc_catlist,regular,no_dead_strip
3409  GV.setSection(TrimSpaces(Section));
3410  }
3411 }
3412 
3413 static bool isOldLoopArgument(Metadata *MD) {
3414  auto *T = dyn_cast_or_null<MDTuple>(MD);
3415  if (!T)
3416  return false;
3417  if (T->getNumOperands() < 1)
3418  return false;
3419  auto *S = dyn_cast_or_null<MDString>(T->getOperand(0));
3420  if (!S)
3421  return false;
3422  return S->getString().startswith("llvm.vectorizer.");
3423 }
3424 
3426  StringRef OldPrefix = "llvm.vectorizer.";
3427  assert(OldTag.startswith(OldPrefix) && "Expected old prefix");
3428 
3429  if (OldTag == "llvm.vectorizer.unroll")
3430  return MDString::get(C, "llvm.loop.interleave.count");
3431 
3432  return MDString::get(
3433  C, (Twine("llvm.loop.vectorize.") + OldTag.drop_front(OldPrefix.size()))
3434  .str());
3435 }
3436 
3438  auto *T = dyn_cast_or_null<MDTuple>(MD);
3439  if (!T)
3440  return MD;
3441  if (T->getNumOperands() < 1)
3442  return MD;
3443  auto *OldTag = dyn_cast_or_null<MDString>(T->getOperand(0));
3444  if (!OldTag)
3445  return MD;
3446  if (!OldTag->getString().startswith("llvm.vectorizer."))
3447  return MD;
3448 
3449  // This has an old tag. Upgrade it.
3451  Ops.reserve(T->getNumOperands());
3452  Ops.push_back(upgradeLoopTag(T->getContext(), OldTag->getString()));
3453  for (unsigned I = 1, E = T->getNumOperands(); I != E; ++I)
3454  Ops.push_back(T->getOperand(I));
3455 
3456  return MDTuple::get(T->getContext(), Ops);
3457 }
3458 
3460  auto *T = dyn_cast<MDTuple>(&N);
3461  if (!T)
3462  return &N;
3463 
3464  if (none_of(T->operands(), isOldLoopArgument))
3465  return &N;
3466 
3468  Ops.reserve(T->getNumOperands());
3469  for (Metadata *MD : T->operands())
3470  Ops.push_back(upgradeLoopArgument(MD));
3471 
3472  return MDTuple::get(T->getContext(), Ops);
3473 }
IntegerType * getInt16Ty()
Fetch the type representing a 16-bit integer.
Definition: IRBuilder.h:342
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
Type * getVectorElementType() const
Definition: Type.h:368
const NoneType None
Definition: None.h:24
static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn)
uint64_t CallInst * C
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks &#39;this&#39; from the containing basic block and deletes it.
Definition: Instruction.cpp:68
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
Function * getCalledFunction() const
Return the function called, or null if this is an indirect function invocation.
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1846
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata *> MDs)
Definition: Metadata.h:1131
raw_ostream & errs()
This returns a reference to a raw_ostream for standard error.
static IntegerType * getInt1Ty(LLVMContext &C)
Definition: Type.cpp:173
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
LLVMContext & Context
Value * CreateIsNotNull(Value *Arg, const Twine &Name="")
Return an i1 value testing if Arg is not null.
Definition: IRBuilder.h:1996
void UpgradeSectionAttributes(Module &M)
Takes the max of the two values, which are required to be integers.
Definition: Module.h:143
MDNode * getOperand(unsigned i) const
Definition: Metadata.cpp:1081
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:115
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
Type * getParamType(unsigned i) const
Parameter type accessors.
Definition: DerivedTypes.h:135
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1148
A Module instance is used to store all the information related to an LLVM module. ...
Definition: Module.h:63
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
Definition: SmallVector.h:137
static MDString * get(LLVMContext &Context, StringRef Str)
Definition: Metadata.cpp:454
bool UpgradeRetainReleaseMarker(Module &M)
This checks for objc retain release marker which should be upgraded.
static Value * upgradeAbs(IRBuilder<> &Builder, CallInst &CI)
static ConstantAggregateZero * get(Type *Ty)
Definition: Constants.cpp:1299
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE size_t size() const
size - Get the string size.
Definition: StringRef.h:138
This class represents a function call, abstracting a target machine&#39;s calling convention.
unsigned less or equal
Definition: InstrTypes.h:911
unsigned less than
Definition: InstrTypes.h:910
static Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:1727
void UpgradeInlineAsmString(std::string *AsmStr)
Upgrade comment in call to inline asm that represents an objc retain release marker.
unsigned getDebugMetadataVersionFromModule(const Module &M)
Return Debug Info Metadata Version by checking module flags.
Definition: DebugInfo.cpp:684
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:713
A raw_ostream that writes to an SmallVector or SmallString.
Definition: raw_ostream.h:504
Value * CreateSExt(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:1560
Metadata node.
Definition: Metadata.h:862
F(f)
const MDOperand & getOperand(unsigned I) const
Definition: Metadata.h:1067
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
Definition: DerivedTypes.h:503
An instruction for reading from memory.
Definition: Instructions.h:164
static IntegerType * getInt64Ty(LLVMContext &C)
Definition: Type.cpp:177
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:227
Value * CreateICmpULE(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1756
void reserve(size_type N)
Definition: SmallVector.h:377
void UpgradeIntrinsicCall(CallInst *CI, Function *NewFn)
This is the complement to the above, replacing a specific call to an intrinsic function with a call t...
static Value * EmitX86Select(IRBuilder<> &Builder, Value *Mask, Value *Op0, Value *Op1)
void setOperand(unsigned I, MDNode *New)
Definition: Metadata.cpp:1089
static Value * upgradeIntMinMax(IRBuilder<> &Builder, CallInst &CI, ICmpInst::Predicate Pred)
uint64_t High
Diagnostic information for stripping invalid debug metadata.
Tuple of metadata.
Definition: Metadata.h:1104
static Constant * getNullValue(Type *Ty)
Constructor to create a &#39;0&#39; constant of arbitrary type.
Definition: Constants.cpp:258
Value * CreateNot(Value *V, const Twine &Name="")
Definition: IRBuilder.h:1282
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, unsigned Align, bool isVolatile=false)
Definition: IRBuilder.h:1346
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Definition: IRBuilder.h:347
unsigned getBitWidth() const
Return the number of bits in the Vector type.
Definition: DerivedTypes.h:452
A tuple of MDNodes.
Definition: Metadata.h:1323
static Value * UpgradeX86PSRLDQIntrinsics(IRBuilder<> &Builder, Value *Op, unsigned Shift)
Value * CreateFPExt(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:1615
MDNode * upgradeInstructionLoopAttachment(MDNode &N)
Upgrade the loop attachment metadata node.
StringRef getName(ID id)
Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
Definition: Function.cpp:612
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
void UpgradeCallsToIntrinsic(Function *F)
This is an auto-upgrade hook for any old intrinsic function syntaxes which need to have both the func...
static Type * getFloatTy(LLVMContext &C)
Definition: Type.cpp:164
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
Definition: ArrayRef.h:451
LLVMContext & getContext() const
Get the global data context.
Definition: Module.h:242
static bool ShouldUpgradeX86Intrinsic(Function *F, StringRef Name)
Definition: AutoUpgrade.cpp:81
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
Definition: Type.cpp:639
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:197
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:731
static Constant * AddOne(Constant *C)
Add one to a Constant.
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:922
unsigned Intr
unsigned getNumOperands() const
Definition: Metadata.cpp:1077
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:962
static Value * UpgradeMaskToInt(IRBuilder<> &Builder, CallInst &CI)
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:295
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE R Default(T Value)
Definition: StringSwitch.h:203
uint64_t getNumElements() const
Definition: DerivedTypes.h:359
static Metadata * upgradeLoopArgument(Metadata *MD)
AttributeList getAttributes(LLVMContext &C, ID id)
Return the attributes for an intrinsic.
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE bool startswith(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:267
llvm::Optional< Function * > remangleIntrinsicFunction(Function *F)
Definition: Function.cpp:1198
Class to represent function types.
Definition: DerivedTypes.h:103
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:1629
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:245
LLVMContext & getContext() const
Definition: Metadata.h:922
bool StripDebugInfo(Module &M)
Strip debug info in the module if it exists.
Definition: DebugInfo.cpp:351
NamedMDNode * getNamedMetadata(const Twine &Name) const
Return the first NamedMDNode in the module with the specified name.
Definition: Module.cpp:242
Value * CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name="")
Return a vector value that contains.
Definition: IRBuilder.h:2047
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:33
bool UpgradeIntrinsicFunction(Function *F, Function *&NewFn)
This is a more granular function that simply checks an intrinsic function for upgrading, and returns true if it requires upgrading.
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:979
Value * CreateSIToFP(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:1606
An instruction for storing to memory.
Definition: Instructions.h:306
LinkageTypes getLinkage() const
Definition: GlobalValue.h:450
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:439
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
Definition: StringRef.h:598
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:1556
static ConstantAsMetadata * get(Constant *C)
Definition: Metadata.h:408
Function * getDeclaration(Module *M, ID id, ArrayRef< Type *> Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1001
static Value * UpgradeMaskedLoad(IRBuilder<> &Builder, Value *Ptr, Value *Passthru, Value *Mask, bool Aligned)
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block...
Definition: IRBuilder.h:127
Class to represent pointers.
Definition: DerivedTypes.h:467
static Value * UpgradeX86PSLLDQIntrinsics(IRBuilder<> &Builder, Value *Op, unsigned Shift)
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1130
bool UpgradeModuleFlags(Module &M)
This checks for module flags which should be upgraded.
Value * getOperand(unsigned i_nocapture) const
StringRef getString() const
Definition: Metadata.cpp:464
static Value * UpgradeMaskedStore(IRBuilder<> &Builder, Value *Ptr, Value *Data, Value *Mask, bool Aligned)
LoadInst * CreateLoad(Value *Ptr, const char *Name)
Provided to resolve &#39;CreateLoad(Ptr, "...")&#39; correctly, instead of converting the string to &#39;bool&#39; fo...
Definition: IRBuilder.h:1305
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata *> MDs)
Definition: Metadata.h:1164
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:43
Type * getReturnType() const
Returns the type of the ret val.
Definition: Function.h:155
void getModuleFlagsMetadata(SmallVectorImpl< ModuleFlagEntry > &Flags) const
Returns the module flags in the provided vector.
Definition: Module.cpp:282
Emits an error if two values disagree, otherwise the resulting value is that of the operands...
Definition: Module.h:114
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Value * CreateFMul(Value *L, Value *R, const Twine &Name="", MDNode *FPMD=nullptr)
Definition: IRBuilder.h:1195
Value * CreateAShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1092
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:429
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:46
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:69
ConstantInt * getInt1(bool V)
Get a constant value representing either true or false.
Definition: IRBuilder.h:282
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This is an important base class in LLVM.
Definition: Constant.h:42
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
Definition: IRBuilder.h:1901
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Definition: DerivedTypes.h:139
bool UpgradeDebugInfo(Module &M)
Check the debug info version number, if it is out-dated, drop the debug info.
ArrayRef< Type * > params() const
Definition: DerivedTypes.h:130
static VectorType * getInteger(VectorType *VTy)
This static method gets a VectorType with the same number of elements as the input type...
Definition: DerivedTypes.h:406
void addModuleFlag(ModFlagBehavior Behavior, StringRef Key, Metadata *Val)
Add a module-level flag to the module-level flags metadata.
Definition: Module.cpp:329
Value * CreateNeg(Value *V, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1256
static bool UpgradeX86IntrinsicsWith8BitMask(Function *F, Intrinsic::ID IID, Function *&NewFn)
Definition: AutoUpgrade.cpp:54
void setCalledFunction(Value *Fn)
Set the function called.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:885
Value * CreateICmpSGE(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1764
static FunctionType * get(Type *Result, ArrayRef< Type *> Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
Definition: Type.cpp:297
size_t arg_size() const
Definition: Function.h:684
arg_iterator arg_begin()
Definition: Function.h:657
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1736
self_iterator getIterator()
Definition: ilist_node.h:82
IntegerType * getIntNTy(unsigned N)
Fetch the type representing an N-bit integer.
Definition: IRBuilder.h:360
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
Definition: IRBuilder.h:1921
static Constant * getAllOnesValue(Type *Ty)
Definition: Constants.cpp:312
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function. ...
Definition: Function.cpp:193
static UndefValue * get(Type *T)
Static factory methods - Return an &#39;undef&#39; object of the specified type.
Definition: Constants.cpp:1382
iterator_range< User::op_iterator > arg_operands()
Iteration adapter for range-for loops.
static wasm::ValType getType(const TargetRegisterClass *RC)
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
Definition: Metadata.cpp:1226
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:996
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:1552
static bool UpgradeX86IntrinsicFunction(Function *F, StringRef Name, Function *&NewFn)
signed greater than
Definition: InstrTypes.h:912
Value * CreateUIToFP(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:1602
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE StringRef drop_front(size_t N=1) const
Return a StringRef equal to &#39;this&#39; but with the first N elements dropped.
Definition: StringRef.h:645
static Value * getX86MaskVec(IRBuilder<> &Builder, Value *Mask, unsigned NumElts)
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition: Type.h:224
static Value * UpgradeX86ALIGNIntrinsics(IRBuilder<> &Builder, Value *Op0, Value *Op1, Value *Shift, Value *Passthru, Value *Mask, bool IsVALIGN)
Instruction * UpgradeBitCastInst(unsigned Opc, Value *V, Type *DestTy, Instruction *&Temp)
This is an auto-upgrade for bitcast between pointers with different address spaces: the instruction i...
static bool upgradeAVX512MaskToSelect(StringRef Name, IRBuilder<> &Builder, CallInst &CI, Value *&Rep)
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the generic address space (address sp...
Definition: DerivedTypes.h:482
This is the shared class of boolean and integer constants.
Definition: Constants.h:84
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type...
Definition: Type.cpp:130
Value * CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1748
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:861
static MDString * upgradeLoopTag(LLVMContext &C, StringRef OldTag)
Module.h This file contains the declarations for the Module class.
bool UpgradeGlobalVariable(GlobalVariable *GV)
This checks for global variables which should be upgraded.
Value * CreateInsertElement(Value *Vec, Value *NewElt, Value *Idx, const Twine &Name="")
Definition: IRBuilder.h:1934
LLVM_NODISCARD std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition: StringRef.h:727
signed less than
Definition: InstrTypes.h:914
CallInst * CreateMaskedStore(Value *Val, Value *Ptr, unsigned Align, Value *Mask)
Create a call to Masked Store intrinsic.
Definition: IRBuilder.cpp:491
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
Definition: IRBuilder.h:1948
StringRef str()
Return a StringRef for the vector contents.
Definition: raw_ostream.h:529
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:611
static bool UpgradeX86MaskedFPCompare(Function *F, Intrinsic::ID IID, Function *&NewFn)
Definition: AutoUpgrade.cpp:70
CallInst * CreateMaskedLoad(Value *Ptr, unsigned Align, Value *Mask, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Load intrinsic.
Definition: IRBuilder.cpp:470
static Value * UpgradeX86MaskedShift(IRBuilder<> &Builder, CallInst &CI, Intrinsic::ID IID)
void setAttributes(AttributeList Attrs)
Set the attribute list for this Function.
Definition: Function.h:213
static VectorType * getExtendedElementVectorType(VectorType *VTy)
This static method is like getInteger except that the element types are twice as wide as the elements...
Definition: DerivedTypes.h:415
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition: Function.h:180
Uses the specified value, regardless of the behavior or value of the other module.
Definition: Module.h:132
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:924
unsigned getVectorNumElements() const
Definition: DerivedTypes.h:462
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition: Function.h:150
MDNode * UpgradeTBAANode(MDNode &TBAANode)
If the given TBAA tag uses the scalar TBAA format, create a new node corresponding to the upgrade to ...
signed less or equal
Definition: InstrTypes.h:915
Class to represent vector types.
Definition: DerivedTypes.h:393
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:56
Class for arbitrary precision integers.
Definition: APInt.h:69
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Definition: IRBuilder.h:337
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1051
LLVM_ATTRIBUTE_ALWAYS_INLINE StringSwitch & Case(StringLiteral S, T Value)
Definition: StringSwitch.h:70
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:1675
bool verifyModule(const Module &M, raw_ostream *OS=nullptr, bool *BrokenDebugInfo=nullptr)
Check a module for errors.
Definition: Verifier.cpp:4660
ConstantInt * getFalse()
Get the constant value for i1 false.
Definition: IRBuilder.h:292
amdgpu Simplify well known AMD library false Value Value * Arg
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
Definition: IRBuilder.h:332
static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", Instruction *InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass&#39;s ...
static bool isOldLoopArgument(Metadata *MD)
static const size_t npos
Definition: StringRef.h:51
static IntegerType * getInt32Ty(LLVMContext &C)
Definition: Type.cpp:176
unsigned getNumArgOperands() const
Return the number of call arguments.
unsigned greater or equal
Definition: InstrTypes.h:909
StringRef getName() const
Return a constant reference to the value&#39;s name.
Definition: Value.cpp:224
static Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:1714
#define I(x, y, z)
Definition: MD5.cpp:58
#define N
Value * CreateFSub(Value *L, Value *R, const Twine &Name="", MDNode *FPMD=nullptr)
Definition: IRBuilder.h:1178
Diagnostic information for debug metadata version reporting.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static Value * upgradeMaskedMove(IRBuilder<> &Builder, CallInst &CI)
static void rename(GlobalValue *GV)
Definition: AutoUpgrade.cpp:34
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:323
LLVM_ATTRIBUTE_ALWAYS_INLINE StringSwitch & Cases(StringLiteral S0, StringLiteral S1, T Value)
Definition: StringSwitch.h:94
static Value * upgradeMaskedCompare(IRBuilder<> &Builder, CallInst &CI, unsigned CC, bool Signed)
void eraseFromParent()
eraseFromParent - This method unlinks &#39;this&#39; from the containing module and deletes it...
Definition: Function.cpp:209
Type * getFloatTy()
Fetch the type representing a 32-bit floating point value.
Definition: IRBuilder.h:370
void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
Value * CreateFAdd(Value *L, Value *R, const Twine &Name="", MDNode *FPMD=nullptr)
Definition: IRBuilder.h:1161
static Value * ApplyX86MaskOn1BitsVec(IRBuilder<> &Builder, Value *Vec, Value *Mask, unsigned NumElts)
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1112
Value * CreateFDiv(Value *L, Value *R, const Twine &Name="", MDNode *FPMD=nullptr)
Definition: IRBuilder.h:1212
static bool UpgradePTESTIntrinsic(Function *F, Intrinsic::ID IID, Function *&NewFn)
Definition: AutoUpgrade.cpp:38
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition: Type.h:185
unsigned getMDKindID(StringRef Name) const
Return a unique non-zero ID for the specified metadata kind.
Definition: Module.cpp:118
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
user_iterator user_begin()
Definition: Value.h:375
Value * getArgOperand(unsigned i) const
getArgOperand/setArgOperand - Return/set the i-th call argument.
LoadInst * CreateAlignedLoad(Value *Ptr, unsigned Align, const char *Name)
Provided to resolve &#39;CreateAlignedLoad(Ptr, Align, "...")&#39; correctly, instead of converting the strin...
Definition: IRBuilder.h:1328
unsigned getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition: Type.cpp:115
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:565
LLVM Value Representation.
Definition: Value.h:73
constexpr char Size[]
Key for Kernel::Arg::Metadata::mSize.
static VectorType * get(Type *ElementType, unsigned NumElements)
This static method is the primary way to construct an VectorType.
Definition: Type.cpp:593
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E&#39;s largest value.
Definition: BitmaskEnum.h:81
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1072
bool match(StringRef String, SmallVectorImpl< StringRef > *Matches=nullptr)
matches - Match the regex against a given String.
Definition: Regex.cpp:73
ConstantInt * getInt8(uint8_t C)
Get a constant 8-bit value.
Definition: IRBuilder.h:297
iterator_range< global_iterator > globals()
Definition: Module.h:566
unsigned greater than
Definition: InstrTypes.h:908
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:49
A single uniqued string.
Definition: Metadata.h:602
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, const Twine &N="", Module *M=nullptr)
Definition: Function.h:136
static Value * upgradePMULDQ(IRBuilder<> &Builder, CallInst &CI, bool IsSigned)
Value * UpgradeBitCastExpr(unsigned Opc, Constant *C, Type *DestTy)
This is an auto-upgrade for bitcast constant expression between pointers with different address space...
unsigned getNumOperands() const
Return number of MDNode operands.
Definition: Metadata.h:1073
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE size_t find(char C, size_t From=0) const
Search for the first character C in the string.
Definition: StringRef.h:298
Root of the metadata hierarchy.
Definition: Metadata.h:58
Value * CreateICmpSLE(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1772
const uint64_t Version
Definition: InstrProf.h:895
static IntegerType * getInt8Ty(LLVMContext &C)
Definition: Type.cpp:174
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
Value * CreateFNeg(Value *V, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:1274
signed greater or equal
Definition: InstrTypes.h:913
IntegerType * Int32Ty
const BasicBlock * getParent() const
Definition: Instruction.h:67
CallInst * CreateCall(Value *Callee, ArrayRef< Value *> Args=None, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:1871
user_iterator user_end()
Definition: Value.h:383