LLVM 19.0.0git
AutoUpgrade.cpp
Go to the documentation of this file.
1//===-- AutoUpgrade.cpp - Implement auto-upgrade helper functions ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the auto-upgrade helper functions.
10// This is where deprecated IR intrinsics and other IR features are updated to
11// current specifications.
12//
13//===----------------------------------------------------------------------===//
14
15#include "llvm/IR/AutoUpgrade.h"
16#include "llvm/ADT/StringRef.h"
20#include "llvm/IR/Constants.h"
21#include "llvm/IR/DebugInfo.h"
24#include "llvm/IR/Function.h"
25#include "llvm/IR/IRBuilder.h"
26#include "llvm/IR/InstVisitor.h"
27#include "llvm/IR/Instruction.h"
29#include "llvm/IR/Intrinsics.h"
30#include "llvm/IR/IntrinsicsAArch64.h"
31#include "llvm/IR/IntrinsicsARM.h"
32#include "llvm/IR/IntrinsicsNVPTX.h"
33#include "llvm/IR/IntrinsicsRISCV.h"
34#include "llvm/IR/IntrinsicsWebAssembly.h"
35#include "llvm/IR/IntrinsicsX86.h"
36#include "llvm/IR/LLVMContext.h"
37#include "llvm/IR/Metadata.h"
38#include "llvm/IR/Module.h"
39#include "llvm/IR/Verifier.h"
42#include "llvm/Support/Regex.h"
44#include <cstring>
45
46using namespace llvm;
47
48static cl::opt<bool>
49 DisableAutoUpgradeDebugInfo("disable-auto-upgrade-debug-info",
50 cl::desc("Disable autoupgrade of debug info"));
51
52static void rename(GlobalValue *GV) { GV->setName(GV->getName() + ".old"); }
53
54// Upgrade the declarations of the SSE4.1 ptest intrinsics whose arguments have
55// changed their type from v4f32 to v2i64.
57 Function *&NewFn) {
58 // Check whether this is an old version of the function, which received
59 // v4f32 arguments.
60 Type *Arg0Type = F->getFunctionType()->getParamType(0);
61 if (Arg0Type != FixedVectorType::get(Type::getFloatTy(F->getContext()), 4))
62 return false;
63
64 // Yes, it's old, replace it with new version.
65 rename(F);
66 NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
67 return true;
68}
69
70// Upgrade the declarations of intrinsic functions whose 8-bit immediate mask
71// arguments have changed their type from i32 to i8.
73 Function *&NewFn) {
74 // Check that the last argument is an i32.
75 Type *LastArgType = F->getFunctionType()->getParamType(
76 F->getFunctionType()->getNumParams() - 1);
77 if (!LastArgType->isIntegerTy(32))
78 return false;
79
80 // Move this function aside and map down.
81 rename(F);
82 NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
83 return true;
84}
85
86// Upgrade the declaration of fp compare intrinsics that change return type
87// from scalar to vXi1 mask.
89 Function *&NewFn) {
90 // Check if the return type is a vector.
91 if (F->getReturnType()->isVectorTy())
92 return false;
93
94 rename(F);
95 NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
96 return true;
97}
98
100 Function *&NewFn) {
101 if (F->getReturnType()->getScalarType()->isBFloatTy())
102 return false;
103
104 rename(F);
105 NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
106 return true;
107}
108
110 Function *&NewFn) {
111 if (F->getFunctionType()->getParamType(1)->getScalarType()->isBFloatTy())
112 return false;
113
114 rename(F);
115 NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
116 return true;
117}
118
120 // All of the intrinsics matches below should be marked with which llvm
121 // version started autoupgrading them. At some point in the future we would
122 // like to use this information to remove upgrade code for some older
123 // intrinsics. It is currently undecided how we will determine that future
124 // point.
125 if (Name.consume_front("avx."))
126 return (Name.starts_with("blend.p") || // Added in 3.7
127 Name == "cvt.ps2.pd.256" || // Added in 3.9
128 Name == "cvtdq2.pd.256" || // Added in 3.9
129 Name == "cvtdq2.ps.256" || // Added in 7.0
130 Name.starts_with("movnt.") || // Added in 3.2
131 Name.starts_with("sqrt.p") || // Added in 7.0
132 Name.starts_with("storeu.") || // Added in 3.9
133 Name.starts_with("vbroadcast.s") || // Added in 3.5
134 Name.starts_with("vbroadcastf128") || // Added in 4.0
135 Name.starts_with("vextractf128.") || // Added in 3.7
136 Name.starts_with("vinsertf128.") || // Added in 3.7
137 Name.starts_with("vperm2f128.") || // Added in 6.0
138 Name.starts_with("vpermil.")); // Added in 3.1
139
140 if (Name.consume_front("avx2."))
141 return (Name == "movntdqa" || // Added in 5.0
142 Name.starts_with("pabs.") || // Added in 6.0
143 Name.starts_with("padds.") || // Added in 8.0
144 Name.starts_with("paddus.") || // Added in 8.0
145 Name.starts_with("pblendd.") || // Added in 3.7
146 Name == "pblendw" || // Added in 3.7
147 Name.starts_with("pbroadcast") || // Added in 3.8
148 Name.starts_with("pcmpeq.") || // Added in 3.1
149 Name.starts_with("pcmpgt.") || // Added in 3.1
150 Name.starts_with("pmax") || // Added in 3.9
151 Name.starts_with("pmin") || // Added in 3.9
152 Name.starts_with("pmovsx") || // Added in 3.9
153 Name.starts_with("pmovzx") || // Added in 3.9
154 Name == "pmul.dq" || // Added in 7.0
155 Name == "pmulu.dq" || // Added in 7.0
156 Name.starts_with("psll.dq") || // Added in 3.7
157 Name.starts_with("psrl.dq") || // Added in 3.7
158 Name.starts_with("psubs.") || // Added in 8.0
159 Name.starts_with("psubus.") || // Added in 8.0
160 Name.starts_with("vbroadcast") || // Added in 3.8
161 Name == "vbroadcasti128" || // Added in 3.7
162 Name == "vextracti128" || // Added in 3.7
163 Name == "vinserti128" || // Added in 3.7
164 Name == "vperm2i128"); // Added in 6.0
165
166 if (Name.consume_front("avx512.")) {
167 if (Name.consume_front("mask."))
168 // 'avx512.mask.*'
169 return (Name.starts_with("add.p") || // Added in 7.0. 128/256 in 4.0
170 Name.starts_with("and.") || // Added in 3.9
171 Name.starts_with("andn.") || // Added in 3.9
172 Name.starts_with("broadcast.s") || // Added in 3.9
173 Name.starts_with("broadcastf32x4.") || // Added in 6.0
174 Name.starts_with("broadcastf32x8.") || // Added in 6.0
175 Name.starts_with("broadcastf64x2.") || // Added in 6.0
176 Name.starts_with("broadcastf64x4.") || // Added in 6.0
177 Name.starts_with("broadcasti32x4.") || // Added in 6.0
178 Name.starts_with("broadcasti32x8.") || // Added in 6.0
179 Name.starts_with("broadcasti64x2.") || // Added in 6.0
180 Name.starts_with("broadcasti64x4.") || // Added in 6.0
181 Name.starts_with("cmp.b") || // Added in 5.0
182 Name.starts_with("cmp.d") || // Added in 5.0
183 Name.starts_with("cmp.q") || // Added in 5.0
184 Name.starts_with("cmp.w") || // Added in 5.0
185 Name.starts_with("compress.b") || // Added in 9.0
186 Name.starts_with("compress.d") || // Added in 9.0
187 Name.starts_with("compress.p") || // Added in 9.0
188 Name.starts_with("compress.q") || // Added in 9.0
189 Name.starts_with("compress.store.") || // Added in 7.0
190 Name.starts_with("compress.w") || // Added in 9.0
191 Name.starts_with("conflict.") || // Added in 9.0
192 Name.starts_with("cvtdq2pd.") || // Added in 4.0
193 Name.starts_with("cvtdq2ps.") || // Added in 7.0 updated 9.0
194 Name == "cvtpd2dq.256" || // Added in 7.0
195 Name == "cvtpd2ps.256" || // Added in 7.0
196 Name == "cvtps2pd.128" || // Added in 7.0
197 Name == "cvtps2pd.256" || // Added in 7.0
198 Name.starts_with("cvtqq2pd.") || // Added in 7.0 updated 9.0
199 Name == "cvtqq2ps.256" || // Added in 9.0
200 Name == "cvtqq2ps.512" || // Added in 9.0
201 Name == "cvttpd2dq.256" || // Added in 7.0
202 Name == "cvttps2dq.128" || // Added in 7.0
203 Name == "cvttps2dq.256" || // Added in 7.0
204 Name.starts_with("cvtudq2pd.") || // Added in 4.0
205 Name.starts_with("cvtudq2ps.") || // Added in 7.0 updated 9.0
206 Name.starts_with("cvtuqq2pd.") || // Added in 7.0 updated 9.0
207 Name == "cvtuqq2ps.256" || // Added in 9.0
208 Name == "cvtuqq2ps.512" || // Added in 9.0
209 Name.starts_with("dbpsadbw.") || // Added in 7.0
210 Name.starts_with("div.p") || // Added in 7.0. 128/256 in 4.0
211 Name.starts_with("expand.b") || // Added in 9.0
212 Name.starts_with("expand.d") || // Added in 9.0
213 Name.starts_with("expand.load.") || // Added in 7.0
214 Name.starts_with("expand.p") || // Added in 9.0
215 Name.starts_with("expand.q") || // Added in 9.0
216 Name.starts_with("expand.w") || // Added in 9.0
217 Name.starts_with("fpclass.p") || // Added in 7.0
218 Name.starts_with("insert") || // Added in 4.0
219 Name.starts_with("load.") || // Added in 3.9
220 Name.starts_with("loadu.") || // Added in 3.9
221 Name.starts_with("lzcnt.") || // Added in 5.0
222 Name.starts_with("max.p") || // Added in 7.0. 128/256 in 5.0
223 Name.starts_with("min.p") || // Added in 7.0. 128/256 in 5.0
224 Name.starts_with("movddup") || // Added in 3.9
225 Name.starts_with("move.s") || // Added in 4.0
226 Name.starts_with("movshdup") || // Added in 3.9
227 Name.starts_with("movsldup") || // Added in 3.9
228 Name.starts_with("mul.p") || // Added in 7.0. 128/256 in 4.0
229 Name.starts_with("or.") || // Added in 3.9
230 Name.starts_with("pabs.") || // Added in 6.0
231 Name.starts_with("packssdw.") || // Added in 5.0
232 Name.starts_with("packsswb.") || // Added in 5.0
233 Name.starts_with("packusdw.") || // Added in 5.0
234 Name.starts_with("packuswb.") || // Added in 5.0
235 Name.starts_with("padd.") || // Added in 4.0
236 Name.starts_with("padds.") || // Added in 8.0
237 Name.starts_with("paddus.") || // Added in 8.0
238 Name.starts_with("palignr.") || // Added in 3.9
239 Name.starts_with("pand.") || // Added in 3.9
240 Name.starts_with("pandn.") || // Added in 3.9
241 Name.starts_with("pavg") || // Added in 6.0
242 Name.starts_with("pbroadcast") || // Added in 6.0
243 Name.starts_with("pcmpeq.") || // Added in 3.9
244 Name.starts_with("pcmpgt.") || // Added in 3.9
245 Name.starts_with("perm.df.") || // Added in 3.9
246 Name.starts_with("perm.di.") || // Added in 3.9
247 Name.starts_with("permvar.") || // Added in 7.0
248 Name.starts_with("pmaddubs.w.") || // Added in 7.0
249 Name.starts_with("pmaddw.d.") || // Added in 7.0
250 Name.starts_with("pmax") || // Added in 4.0
251 Name.starts_with("pmin") || // Added in 4.0
252 Name == "pmov.qd.256" || // Added in 9.0
253 Name == "pmov.qd.512" || // Added in 9.0
254 Name == "pmov.wb.256" || // Added in 9.0
255 Name == "pmov.wb.512" || // Added in 9.0
256 Name.starts_with("pmovsx") || // Added in 4.0
257 Name.starts_with("pmovzx") || // Added in 4.0
258 Name.starts_with("pmul.dq.") || // Added in 4.0
259 Name.starts_with("pmul.hr.sw.") || // Added in 7.0
260 Name.starts_with("pmulh.w.") || // Added in 7.0
261 Name.starts_with("pmulhu.w.") || // Added in 7.0
262 Name.starts_with("pmull.") || // Added in 4.0
263 Name.starts_with("pmultishift.qb.") || // Added in 8.0
264 Name.starts_with("pmulu.dq.") || // Added in 4.0
265 Name.starts_with("por.") || // Added in 3.9
266 Name.starts_with("prol.") || // Added in 8.0
267 Name.starts_with("prolv.") || // Added in 8.0
268 Name.starts_with("pror.") || // Added in 8.0
269 Name.starts_with("prorv.") || // Added in 8.0
270 Name.starts_with("pshuf.b.") || // Added in 4.0
271 Name.starts_with("pshuf.d.") || // Added in 3.9
272 Name.starts_with("pshufh.w.") || // Added in 3.9
273 Name.starts_with("pshufl.w.") || // Added in 3.9
274 Name.starts_with("psll.d") || // Added in 4.0
275 Name.starts_with("psll.q") || // Added in 4.0
276 Name.starts_with("psll.w") || // Added in 4.0
277 Name.starts_with("pslli") || // Added in 4.0
278 Name.starts_with("psllv") || // Added in 4.0
279 Name.starts_with("psra.d") || // Added in 4.0
280 Name.starts_with("psra.q") || // Added in 4.0
281 Name.starts_with("psra.w") || // Added in 4.0
282 Name.starts_with("psrai") || // Added in 4.0
283 Name.starts_with("psrav") || // Added in 4.0
284 Name.starts_with("psrl.d") || // Added in 4.0
285 Name.starts_with("psrl.q") || // Added in 4.0
286 Name.starts_with("psrl.w") || // Added in 4.0
287 Name.starts_with("psrli") || // Added in 4.0
288 Name.starts_with("psrlv") || // Added in 4.0
289 Name.starts_with("psub.") || // Added in 4.0
290 Name.starts_with("psubs.") || // Added in 8.0
291 Name.starts_with("psubus.") || // Added in 8.0
292 Name.starts_with("pternlog.") || // Added in 7.0
293 Name.starts_with("punpckh") || // Added in 3.9
294 Name.starts_with("punpckl") || // Added in 3.9
295 Name.starts_with("pxor.") || // Added in 3.9
296 Name.starts_with("shuf.f") || // Added in 6.0
297 Name.starts_with("shuf.i") || // Added in 6.0
298 Name.starts_with("shuf.p") || // Added in 4.0
299 Name.starts_with("sqrt.p") || // Added in 7.0
300 Name.starts_with("store.b.") || // Added in 3.9
301 Name.starts_with("store.d.") || // Added in 3.9
302 Name.starts_with("store.p") || // Added in 3.9
303 Name.starts_with("store.q.") || // Added in 3.9
304 Name.starts_with("store.w.") || // Added in 3.9
305 Name == "store.ss" || // Added in 7.0
306 Name.starts_with("storeu.") || // Added in 3.9
307 Name.starts_with("sub.p") || // Added in 7.0. 128/256 in 4.0
308 Name.starts_with("ucmp.") || // Added in 5.0
309 Name.starts_with("unpckh.") || // Added in 3.9
310 Name.starts_with("unpckl.") || // Added in 3.9
311 Name.starts_with("valign.") || // Added in 4.0
312 Name == "vcvtph2ps.128" || // Added in 11.0
313 Name == "vcvtph2ps.256" || // Added in 11.0
314 Name.starts_with("vextract") || // Added in 4.0
315 Name.starts_with("vfmadd.") || // Added in 7.0
316 Name.starts_with("vfmaddsub.") || // Added in 7.0
317 Name.starts_with("vfnmadd.") || // Added in 7.0
318 Name.starts_with("vfnmsub.") || // Added in 7.0
319 Name.starts_with("vpdpbusd.") || // Added in 7.0
320 Name.starts_with("vpdpbusds.") || // Added in 7.0
321 Name.starts_with("vpdpwssd.") || // Added in 7.0
322 Name.starts_with("vpdpwssds.") || // Added in 7.0
323 Name.starts_with("vpermi2var.") || // Added in 7.0
324 Name.starts_with("vpermil.p") || // Added in 3.9
325 Name.starts_with("vpermilvar.") || // Added in 4.0
326 Name.starts_with("vpermt2var.") || // Added in 7.0
327 Name.starts_with("vpmadd52") || // Added in 7.0
328 Name.starts_with("vpshld.") || // Added in 7.0
329 Name.starts_with("vpshldv.") || // Added in 8.0
330 Name.starts_with("vpshrd.") || // Added in 7.0
331 Name.starts_with("vpshrdv.") || // Added in 8.0
332 Name.starts_with("vpshufbitqmb.") || // Added in 8.0
333 Name.starts_with("xor.")); // Added in 3.9
334
335 if (Name.consume_front("mask3."))
336 // 'avx512.mask3.*'
337 return (Name.starts_with("vfmadd.") || // Added in 7.0
338 Name.starts_with("vfmaddsub.") || // Added in 7.0
339 Name.starts_with("vfmsub.") || // Added in 7.0
340 Name.starts_with("vfmsubadd.") || // Added in 7.0
341 Name.starts_with("vfnmsub.")); // Added in 7.0
342
343 if (Name.consume_front("maskz."))
344 // 'avx512.maskz.*'
345 return (Name.starts_with("pternlog.") || // Added in 7.0
346 Name.starts_with("vfmadd.") || // Added in 7.0
347 Name.starts_with("vfmaddsub.") || // Added in 7.0
348 Name.starts_with("vpdpbusd.") || // Added in 7.0
349 Name.starts_with("vpdpbusds.") || // Added in 7.0
350 Name.starts_with("vpdpwssd.") || // Added in 7.0
351 Name.starts_with("vpdpwssds.") || // Added in 7.0
352 Name.starts_with("vpermt2var.") || // Added in 7.0
353 Name.starts_with("vpmadd52") || // Added in 7.0
354 Name.starts_with("vpshldv.") || // Added in 8.0
355 Name.starts_with("vpshrdv.")); // Added in 8.0
356
357 // 'avx512.*'
358 return (Name == "movntdqa" || // Added in 5.0
359 Name == "pmul.dq.512" || // Added in 7.0
360 Name == "pmulu.dq.512" || // Added in 7.0
361 Name.starts_with("broadcastm") || // Added in 6.0
362 Name.starts_with("cmp.p") || // Added in 12.0
363 Name.starts_with("cvtb2mask.") || // Added in 7.0
364 Name.starts_with("cvtd2mask.") || // Added in 7.0
365 Name.starts_with("cvtmask2") || // Added in 5.0
366 Name.starts_with("cvtq2mask.") || // Added in 7.0
367 Name == "cvtusi2sd" || // Added in 7.0
368 Name.starts_with("cvtw2mask.") || // Added in 7.0
369 Name == "kand.w" || // Added in 7.0
370 Name == "kandn.w" || // Added in 7.0
371 Name == "knot.w" || // Added in 7.0
372 Name == "kor.w" || // Added in 7.0
373 Name == "kortestc.w" || // Added in 7.0
374 Name == "kortestz.w" || // Added in 7.0
375 Name.starts_with("kunpck") || // added in 6.0
376 Name == "kxnor.w" || // Added in 7.0
377 Name == "kxor.w" || // Added in 7.0
378 Name.starts_with("padds.") || // Added in 8.0
379 Name.starts_with("pbroadcast") || // Added in 3.9
380 Name.starts_with("prol") || // Added in 8.0
381 Name.starts_with("pror") || // Added in 8.0
382 Name.starts_with("psll.dq") || // Added in 3.9
383 Name.starts_with("psrl.dq") || // Added in 3.9
384 Name.starts_with("psubs.") || // Added in 8.0
385 Name.starts_with("ptestm") || // Added in 6.0
386 Name.starts_with("ptestnm") || // Added in 6.0
387 Name.starts_with("storent.") || // Added in 3.9
388 Name.starts_with("vbroadcast.s") || // Added in 7.0
389 Name.starts_with("vpshld.") || // Added in 8.0
390 Name.starts_with("vpshrd.")); // Added in 8.0
391 }
392
393 if (Name.consume_front("fma."))
394 return (Name.starts_with("vfmadd.") || // Added in 7.0
395 Name.starts_with("vfmsub.") || // Added in 7.0
396 Name.starts_with("vfmsubadd.") || // Added in 7.0
397 Name.starts_with("vfnmadd.") || // Added in 7.0
398 Name.starts_with("vfnmsub.")); // Added in 7.0
399
400 if (Name.consume_front("fma4."))
401 return Name.starts_with("vfmadd.s"); // Added in 7.0
402
403 if (Name.consume_front("sse."))
404 return (Name == "add.ss" || // Added in 4.0
405 Name == "cvtsi2ss" || // Added in 7.0
406 Name == "cvtsi642ss" || // Added in 7.0
407 Name == "div.ss" || // Added in 4.0
408 Name == "mul.ss" || // Added in 4.0
409 Name.starts_with("sqrt.p") || // Added in 7.0
410 Name == "sqrt.ss" || // Added in 7.0
411 Name.starts_with("storeu.") || // Added in 3.9
412 Name == "sub.ss"); // Added in 4.0
413
414 if (Name.consume_front("sse2."))
415 return (Name == "add.sd" || // Added in 4.0
416 Name == "cvtdq2pd" || // Added in 3.9
417 Name == "cvtdq2ps" || // Added in 7.0
418 Name == "cvtps2pd" || // Added in 3.9
419 Name == "cvtsi2sd" || // Added in 7.0
420 Name == "cvtsi642sd" || // Added in 7.0
421 Name == "cvtss2sd" || // Added in 7.0
422 Name == "div.sd" || // Added in 4.0
423 Name == "mul.sd" || // Added in 4.0
424 Name.starts_with("padds.") || // Added in 8.0
425 Name.starts_with("paddus.") || // Added in 8.0
426 Name.starts_with("pcmpeq.") || // Added in 3.1
427 Name.starts_with("pcmpgt.") || // Added in 3.1
428 Name == "pmaxs.w" || // Added in 3.9
429 Name == "pmaxu.b" || // Added in 3.9
430 Name == "pmins.w" || // Added in 3.9
431 Name == "pminu.b" || // Added in 3.9
432 Name == "pmulu.dq" || // Added in 7.0
433 Name.starts_with("pshuf") || // Added in 3.9
434 Name.starts_with("psll.dq") || // Added in 3.7
435 Name.starts_with("psrl.dq") || // Added in 3.7
436 Name.starts_with("psubs.") || // Added in 8.0
437 Name.starts_with("psubus.") || // Added in 8.0
438 Name.starts_with("sqrt.p") || // Added in 7.0
439 Name == "sqrt.sd" || // Added in 7.0
440 Name == "storel.dq" || // Added in 3.9
441 Name.starts_with("storeu.") || // Added in 3.9
442 Name == "sub.sd"); // Added in 4.0
443
444 if (Name.consume_front("sse41."))
445 return (Name.starts_with("blendp") || // Added in 3.7
446 Name == "movntdqa" || // Added in 5.0
447 Name == "pblendw" || // Added in 3.7
448 Name == "pmaxsb" || // Added in 3.9
449 Name == "pmaxsd" || // Added in 3.9
450 Name == "pmaxud" || // Added in 3.9
451 Name == "pmaxuw" || // Added in 3.9
452 Name == "pminsb" || // Added in 3.9
453 Name == "pminsd" || // Added in 3.9
454 Name == "pminud" || // Added in 3.9
455 Name == "pminuw" || // Added in 3.9
456 Name.starts_with("pmovsx") || // Added in 3.8
457 Name.starts_with("pmovzx") || // Added in 3.9
458 Name == "pmuldq"); // Added in 7.0
459
460 if (Name.consume_front("sse42."))
461 return Name == "crc32.64.8"; // Added in 3.4
462
463 if (Name.consume_front("sse4a."))
464 return Name.starts_with("movnt."); // Added in 3.9
465
466 if (Name.consume_front("ssse3."))
467 return (Name == "pabs.b.128" || // Added in 6.0
468 Name == "pabs.d.128" || // Added in 6.0
469 Name == "pabs.w.128"); // Added in 6.0
470
471 if (Name.consume_front("xop."))
472 return (Name == "vpcmov" || // Added in 3.8
473 Name == "vpcmov.256" || // Added in 5.0
474 Name.starts_with("vpcom") || // Added in 3.2, Updated in 9.0
475 Name.starts_with("vprot")); // Added in 8.0
476
477 return (Name == "addcarry.u32" || // Added in 8.0
478 Name == "addcarry.u64" || // Added in 8.0
479 Name == "addcarryx.u32" || // Added in 8.0
480 Name == "addcarryx.u64" || // Added in 8.0
481 Name == "subborrow.u32" || // Added in 8.0
482 Name == "subborrow.u64" || // Added in 8.0
483 Name.starts_with("vcvtph2ps.")); // Added in 11.0
484}
485
487 Function *&NewFn) {
488 // Only handle intrinsics that start with "x86.".
489 if (!Name.consume_front("x86."))
490 return false;
491
493 NewFn = nullptr;
494 return true;
495 }
496
497 if (Name == "rdtscp") { // Added in 8.0
498 // If this intrinsic has 0 operands, it's the new version.
499 if (F->getFunctionType()->getNumParams() == 0)
500 return false;
501
502 rename(F);
503 NewFn = Intrinsic::getDeclaration(F->getParent(),
504 Intrinsic::x86_rdtscp);
505 return true;
506 }
507
509
510 // SSE4.1 ptest functions may have an old signature.
511 if (Name.consume_front("sse41.ptest")) { // Added in 3.2
513 .Case("c", Intrinsic::x86_sse41_ptestc)
514 .Case("z", Intrinsic::x86_sse41_ptestz)
515 .Case("nzc", Intrinsic::x86_sse41_ptestnzc)
518 return upgradePTESTIntrinsic(F, ID, NewFn);
519
520 return false;
521 }
522
523 // Several blend and other instructions with masks used the wrong number of
524 // bits.
525
526 // Added in 3.6
528 .Case("sse41.insertps", Intrinsic::x86_sse41_insertps)
529 .Case("sse41.dppd", Intrinsic::x86_sse41_dppd)
530 .Case("sse41.dpps", Intrinsic::x86_sse41_dpps)
531 .Case("sse41.mpsadbw", Intrinsic::x86_sse41_mpsadbw)
532 .Case("avx.dp.ps.256", Intrinsic::x86_avx_dp_ps_256)
533 .Case("avx2.mpsadbw", Intrinsic::x86_avx2_mpsadbw)
536 return upgradeX86IntrinsicsWith8BitMask(F, ID, NewFn);
537
538 if (Name.consume_front("avx512.mask.cmp.")) {
539 // Added in 7.0
541 .Case("pd.128", Intrinsic::x86_avx512_mask_cmp_pd_128)
542 .Case("pd.256", Intrinsic::x86_avx512_mask_cmp_pd_256)
543 .Case("pd.512", Intrinsic::x86_avx512_mask_cmp_pd_512)
544 .Case("ps.128", Intrinsic::x86_avx512_mask_cmp_ps_128)
545 .Case("ps.256", Intrinsic::x86_avx512_mask_cmp_ps_256)
546 .Case("ps.512", Intrinsic::x86_avx512_mask_cmp_ps_512)
549 return upgradeX86MaskedFPCompare(F, ID, NewFn);
550 return false; // No other 'x86.avx523.mask.cmp.*'.
551 }
552
553 if (Name.consume_front("avx512bf16.")) {
554 // Added in 9.0
556 .Case("cvtne2ps2bf16.128",
557 Intrinsic::x86_avx512bf16_cvtne2ps2bf16_128)
558 .Case("cvtne2ps2bf16.256",
559 Intrinsic::x86_avx512bf16_cvtne2ps2bf16_256)
560 .Case("cvtne2ps2bf16.512",
561 Intrinsic::x86_avx512bf16_cvtne2ps2bf16_512)
562 .Case("mask.cvtneps2bf16.128",
563 Intrinsic::x86_avx512bf16_mask_cvtneps2bf16_128)
564 .Case("cvtneps2bf16.256",
565 Intrinsic::x86_avx512bf16_cvtneps2bf16_256)
566 .Case("cvtneps2bf16.512",
567 Intrinsic::x86_avx512bf16_cvtneps2bf16_512)
570 return upgradeX86BF16Intrinsic(F, ID, NewFn);
571
572 // Added in 9.0
574 .Case("dpbf16ps.128", Intrinsic::x86_avx512bf16_dpbf16ps_128)
575 .Case("dpbf16ps.256", Intrinsic::x86_avx512bf16_dpbf16ps_256)
576 .Case("dpbf16ps.512", Intrinsic::x86_avx512bf16_dpbf16ps_512)
579 return upgradeX86BF16DPIntrinsic(F, ID, NewFn);
580 return false; // No other 'x86.avx512bf16.*'.
581 }
582
583 if (Name.consume_front("xop.")) {
585 if (Name.starts_with("vpermil2")) { // Added in 3.9
586 // Upgrade any XOP PERMIL2 index operand still using a float/double
587 // vector.
588 auto Idx = F->getFunctionType()->getParamType(2);
589 if (Idx->isFPOrFPVectorTy()) {
590 unsigned IdxSize = Idx->getPrimitiveSizeInBits();
591 unsigned EltSize = Idx->getScalarSizeInBits();
592 if (EltSize == 64 && IdxSize == 128)
593 ID = Intrinsic::x86_xop_vpermil2pd;
594 else if (EltSize == 32 && IdxSize == 128)
595 ID = Intrinsic::x86_xop_vpermil2ps;
596 else if (EltSize == 64 && IdxSize == 256)
597 ID = Intrinsic::x86_xop_vpermil2pd_256;
598 else
599 ID = Intrinsic::x86_xop_vpermil2ps_256;
600 }
601 } else if (F->arg_size() == 2)
602 // frcz.ss/sd may need to have an argument dropped. Added in 3.2
604 .Case("vfrcz.ss", Intrinsic::x86_xop_vfrcz_ss)
605 .Case("vfrcz.sd", Intrinsic::x86_xop_vfrcz_sd)
607
609 rename(F);
610 NewFn = Intrinsic::getDeclaration(F->getParent(), ID);
611 return true;
612 }
613 return false; // No other 'x86.xop.*'
614 }
615
616 if (Name == "seh.recoverfp") {
617 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::eh_recoverfp);
618 return true;
619 }
620
621 return false;
622}
623
624// Upgrade ARM (IsArm) or Aarch64 (!IsArm) intrinsic fns. Return true iff so.
625// IsArm: 'arm.*', !IsArm: 'aarch64.*'.
628 Function *&NewFn) {
629 if (Name.starts_with("rbit")) {
630 // '(arm|aarch64).rbit'.
631 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::bitreverse,
632 F->arg_begin()->getType());
633 return true;
634 }
635
636 if (Name == "thread.pointer") {
637 // '(arm|aarch64).thread.pointer'.
638 NewFn =
639 Intrinsic::getDeclaration(F->getParent(), Intrinsic::thread_pointer);
640 return true;
641 }
642
643 bool Neon = Name.consume_front("neon.");
644 if (Neon) {
645 // '(arm|aarch64).neon.*'.
646 // Changed in 12.0: bfdot accept v4bf16 and v8bf16 instead of v8i8 and
647 // v16i8 respectively.
648 if (Name.consume_front("bfdot.")) {
649 // (arm|aarch64).neon.bfdot.*'.
652 .Cases("v2f32.v8i8", "v4f32.v16i8",
653 IsArm ? (Intrinsic::ID)Intrinsic::arm_neon_bfdot
654 : (Intrinsic::ID)Intrinsic::aarch64_neon_bfdot)
657 size_t OperandWidth = F->getReturnType()->getPrimitiveSizeInBits();
658 assert((OperandWidth == 64 || OperandWidth == 128) &&
659 "Unexpected operand width");
660 LLVMContext &Ctx = F->getParent()->getContext();
661 std::array<Type *, 2> Tys{
662 {F->getReturnType(),
663 FixedVectorType::get(Type::getBFloatTy(Ctx), OperandWidth / 16)}};
664 NewFn = Intrinsic::getDeclaration(F->getParent(), ID, Tys);
665 return true;
666 }
667 return false; // No other '(arm|aarch64).neon.bfdot.*'.
668 }
669
670 // Changed in 12.0: bfmmla, bfmlalb and bfmlalt are not polymorphic
671 // anymore and accept v8bf16 instead of v16i8.
672 if (Name.consume_front("bfm")) {
673 // (arm|aarch64).neon.bfm*'.
674 if (Name.consume_back(".v4f32.v16i8")) {
675 // (arm|aarch64).neon.bfm*.v4f32.v16i8'.
678 .Case("mla",
679 IsArm ? (Intrinsic::ID)Intrinsic::arm_neon_bfmmla
680 : (Intrinsic::ID)Intrinsic::aarch64_neon_bfmmla)
681 .Case("lalb",
682 IsArm ? (Intrinsic::ID)Intrinsic::arm_neon_bfmlalb
683 : (Intrinsic::ID)Intrinsic::aarch64_neon_bfmlalb)
684 .Case("lalt",
685 IsArm ? (Intrinsic::ID)Intrinsic::arm_neon_bfmlalt
686 : (Intrinsic::ID)Intrinsic::aarch64_neon_bfmlalt)
689 NewFn = Intrinsic::getDeclaration(F->getParent(), ID);
690 return true;
691 }
692 return false; // No other '(arm|aarch64).neon.bfm*.v16i8'.
693 }
694 return false; // No other '(arm|aarch64).neon.bfm*.
695 }
696 // Continue on to Aarch64 Neon or Arm Neon.
697 }
698 // Continue on to Arm or Aarch64.
699
700 if (IsArm) {
701 // 'arm.*'.
702 if (Neon) {
703 // 'arm.neon.*'.
705 .StartsWith("vclz.", Intrinsic::ctlz)
706 .StartsWith("vcnt.", Intrinsic::ctpop)
707 .StartsWith("vqadds.", Intrinsic::sadd_sat)
708 .StartsWith("vqaddu.", Intrinsic::uadd_sat)
709 .StartsWith("vqsubs.", Intrinsic::ssub_sat)
710 .StartsWith("vqsubu.", Intrinsic::usub_sat)
713 NewFn = Intrinsic::getDeclaration(F->getParent(), ID,
714 F->arg_begin()->getType());
715 return true;
716 }
717
718 if (Name.consume_front("vst")) {
719 // 'arm.neon.vst*'.
720 static const Regex vstRegex("^([1234]|[234]lane)\\.v[a-z0-9]*$");
722 if (vstRegex.match(Name, &Groups)) {
723 static const Intrinsic::ID StoreInts[] = {
724 Intrinsic::arm_neon_vst1, Intrinsic::arm_neon_vst2,
725 Intrinsic::arm_neon_vst3, Intrinsic::arm_neon_vst4};
726
727 static const Intrinsic::ID StoreLaneInts[] = {
728 Intrinsic::arm_neon_vst2lane, Intrinsic::arm_neon_vst3lane,
729 Intrinsic::arm_neon_vst4lane};
730
731 auto fArgs = F->getFunctionType()->params();
732 Type *Tys[] = {fArgs[0], fArgs[1]};
733 if (Groups[1].size() == 1)
734 NewFn = Intrinsic::getDeclaration(F->getParent(),
735 StoreInts[fArgs.size() - 3], Tys);
736 else
738 F->getParent(), StoreLaneInts[fArgs.size() - 5], Tys);
739 return true;
740 }
741 return false; // No other 'arm.neon.vst*'.
742 }
743
744 return false; // No other 'arm.neon.*'.
745 }
746
747 if (Name.consume_front("mve.")) {
748 // 'arm.mve.*'.
749 if (Name == "vctp64") {
750 if (cast<FixedVectorType>(F->getReturnType())->getNumElements() == 4) {
751 // A vctp64 returning a v4i1 is converted to return a v2i1. Rename
752 // the function and deal with it below in UpgradeIntrinsicCall.
753 rename(F);
754 return true;
755 }
756 return false; // Not 'arm.mve.vctp64'.
757 }
758
759 // These too are changed to accept a v2i1 instead of the old v4i1.
760 if (Name.consume_back(".v4i1")) {
761 // 'arm.mve.*.v4i1'.
762 if (Name.consume_back(".predicated.v2i64.v4i32"))
763 // 'arm.mve.*.predicated.v2i64.v4i32.v4i1'
764 return Name == "mull.int" || Name == "vqdmull";
765
766 if (Name.consume_back(".v2i64")) {
767 // 'arm.mve.*.v2i64.v4i1'
768 bool IsGather = Name.consume_front("vldr.gather.");
769 if (IsGather || Name.consume_front("vstr.scatter.")) {
770 if (Name.consume_front("base.")) {
771 // Optional 'wb.' prefix.
772 Name.consume_front("wb.");
773 // 'arm.mve.(vldr.gather|vstr.scatter).base.(wb.)?
774 // predicated.v2i64.v2i64.v4i1'.
775 return Name == "predicated.v2i64";
776 }
777
778 if (Name.consume_front("offset.predicated."))
779 return Name == (IsGather ? "v2i64.p0i64" : "p0i64.v2i64") ||
780 Name == (IsGather ? "v2i64.p0" : "p0.v2i64");
781
782 // No other 'arm.mve.(vldr.gather|vstr.scatter).*.v2i64.v4i1'.
783 return false;
784 }
785
786 return false; // No other 'arm.mve.*.v2i64.v4i1'.
787 }
788 return false; // No other 'arm.mve.*.v4i1'.
789 }
790 return false; // No other 'arm.mve.*'.
791 }
792
793 if (Name.consume_front("cde.vcx")) {
794 // 'arm.cde.vcx*'.
795 if (Name.consume_back(".predicated.v2i64.v4i1"))
796 // 'arm.cde.vcx*.predicated.v2i64.v4i1'.
797 return Name == "1q" || Name == "1qa" || Name == "2q" || Name == "2qa" ||
798 Name == "3q" || Name == "3qa";
799
800 return false; // No other 'arm.cde.vcx*'.
801 }
802 } else {
803 // 'aarch64.*'.
804 if (Neon) {
805 // 'aarch64.neon.*'.
807 .StartsWith("frintn", Intrinsic::roundeven)
808 .StartsWith("rbit", Intrinsic::bitreverse)
811 NewFn = Intrinsic::getDeclaration(F->getParent(), ID,
812 F->arg_begin()->getType());
813 return true;
814 }
815
816 if (Name.starts_with("addp")) {
817 // 'aarch64.neon.addp*'.
818 if (F->arg_size() != 2)
819 return false; // Invalid IR.
820 VectorType *Ty = dyn_cast<VectorType>(F->getReturnType());
821 if (Ty && Ty->getElementType()->isFloatingPointTy()) {
822 NewFn = Intrinsic::getDeclaration(F->getParent(),
823 Intrinsic::aarch64_neon_faddp, Ty);
824 return true;
825 }
826 }
827 return false; // No other 'aarch64.neon.*'.
828 }
829 if (Name.consume_front("sve.")) {
830 // 'aarch64.sve.*'.
831 if (Name.consume_front("bf")) {
832 if (Name.consume_back(".lane")) {
833 // 'aarch64.sve.bf*.lane'.
836 .Case("dot", Intrinsic::aarch64_sve_bfdot_lane_v2)
837 .Case("mlalb", Intrinsic::aarch64_sve_bfmlalb_lane_v2)
838 .Case("mlalt", Intrinsic::aarch64_sve_bfmlalt_lane_v2)
841 NewFn = Intrinsic::getDeclaration(F->getParent(), ID);
842 return true;
843 }
844 return false; // No other 'aarch64.sve.bf*.lane'.
845 }
846 return false; // No other 'aarch64.sve.bf*'.
847 }
848
849 if (Name.consume_front("addqv")) {
850 // 'aarch64.sve.addqv'.
851 if (!F->getReturnType()->isFPOrFPVectorTy())
852 return false;
853
854 auto Args = F->getFunctionType()->params();
855 Type *Tys[] = {F->getReturnType(), Args[1]};
856 NewFn = Intrinsic::getDeclaration(F->getParent(),
857 Intrinsic::aarch64_sve_faddqv, Tys);
858 return true;
859 }
860
861 if (Name.consume_front("ld")) {
862 // 'aarch64.sve.ld*'.
863 static const Regex LdRegex("^[234](.nxv[a-z0-9]+|$)");
864 if (LdRegex.match(Name)) {
865 Type *ScalarTy =
866 dyn_cast<VectorType>(F->getReturnType())->getElementType();
867 ElementCount EC = dyn_cast<VectorType>(F->arg_begin()->getType())
868 ->getElementCount();
869 Type *Ty = VectorType::get(ScalarTy, EC);
870 static const Intrinsic::ID LoadIDs[] = {
871 Intrinsic::aarch64_sve_ld2_sret,
872 Intrinsic::aarch64_sve_ld3_sret,
873 Intrinsic::aarch64_sve_ld4_sret,
874 };
875 NewFn = Intrinsic::getDeclaration(F->getParent(),
876 LoadIDs[Name[0] - '2'], Ty);
877 return true;
878 }
879 return false; // No other 'aarch64.sve.ld*'.
880 }
881
882 if (Name.consume_front("tuple.")) {
883 // 'aarch64.sve.tuple.*'.
884 if (Name.starts_with("get")) {
885 // 'aarch64.sve.tuple.get*'.
886 Type *Tys[] = {F->getReturnType(), F->arg_begin()->getType()};
887 NewFn = Intrinsic::getDeclaration(F->getParent(),
888 Intrinsic::vector_extract, Tys);
889 return true;
890 }
891
892 if (Name.starts_with("set")) {
893 // 'aarch64.sve.tuple.set*'.
894 auto Args = F->getFunctionType()->params();
895 Type *Tys[] = {Args[0], Args[2], Args[1]};
896 NewFn = Intrinsic::getDeclaration(F->getParent(),
897 Intrinsic::vector_insert, Tys);
898 return true;
899 }
900
901 static const Regex CreateTupleRegex("^create[234](.nxv[a-z0-9]+|$)");
902 if (CreateTupleRegex.match(Name)) {
903 // 'aarch64.sve.tuple.create*'.
904 auto Args = F->getFunctionType()->params();
905 Type *Tys[] = {F->getReturnType(), Args[1]};
906 NewFn = Intrinsic::getDeclaration(F->getParent(),
907 Intrinsic::vector_insert, Tys);
908 return true;
909 }
910 return false; // No other 'aarch64.sve.tuple.*'.
911 }
912 return false; // No other 'aarch64.sve.*'.
913 }
914 }
915 return false; // No other 'arm.*', 'aarch64.*'.
916}
917
919 if (Name.consume_front("abs."))
921 .Case("bf16", Intrinsic::nvvm_abs_bf16)
922 .Case("bf16x2", Intrinsic::nvvm_abs_bf16x2)
924
925 if (Name.consume_front("fma.rn."))
927 .Case("bf16", Intrinsic::nvvm_fma_rn_bf16)
928 .Case("bf16x2", Intrinsic::nvvm_fma_rn_bf16x2)
929 .Case("ftz.bf16", Intrinsic::nvvm_fma_rn_ftz_bf16)
930 .Case("ftz.bf16x2", Intrinsic::nvvm_fma_rn_ftz_bf16x2)
931 .Case("ftz.relu.bf16", Intrinsic::nvvm_fma_rn_ftz_relu_bf16)
932 .Case("ftz.relu.bf16x2", Intrinsic::nvvm_fma_rn_ftz_relu_bf16x2)
933 .Case("ftz.sat.bf16", Intrinsic::nvvm_fma_rn_ftz_sat_bf16)
934 .Case("ftz.sat.bf16x2", Intrinsic::nvvm_fma_rn_ftz_sat_bf16x2)
935 .Case("relu.bf16", Intrinsic::nvvm_fma_rn_relu_bf16)
936 .Case("relu.bf16x2", Intrinsic::nvvm_fma_rn_relu_bf16x2)
937 .Case("sat.bf16", Intrinsic::nvvm_fma_rn_sat_bf16)
938 .Case("sat.bf16x2", Intrinsic::nvvm_fma_rn_sat_bf16x2)
940
941 if (Name.consume_front("fmax."))
943 .Case("bf16", Intrinsic::nvvm_fmax_bf16)
944 .Case("bf16x2", Intrinsic::nvvm_fmax_bf16x2)
945 .Case("ftz.bf16", Intrinsic::nvvm_fmax_ftz_bf16)
946 .Case("ftz.bf16x2", Intrinsic::nvvm_fmax_ftz_bf16x2)
947 .Case("ftz.nan.bf16", Intrinsic::nvvm_fmax_ftz_nan_bf16)
948 .Case("ftz.nan.bf16x2", Intrinsic::nvvm_fmax_ftz_nan_bf16x2)
949 .Case("ftz.nan.xorsign.abs.bf16",
950 Intrinsic::nvvm_fmax_ftz_nan_xorsign_abs_bf16)
951 .Case("ftz.nan.xorsign.abs.bf16x2",
952 Intrinsic::nvvm_fmax_ftz_nan_xorsign_abs_bf16x2)
953 .Case("ftz.xorsign.abs.bf16", Intrinsic::nvvm_fmax_ftz_xorsign_abs_bf16)
954 .Case("ftz.xorsign.abs.bf16x2",
955 Intrinsic::nvvm_fmax_ftz_xorsign_abs_bf16x2)
956 .Case("nan.bf16", Intrinsic::nvvm_fmax_nan_bf16)
957 .Case("nan.bf16x2", Intrinsic::nvvm_fmax_nan_bf16x2)
958 .Case("nan.xorsign.abs.bf16", Intrinsic::nvvm_fmax_nan_xorsign_abs_bf16)
959 .Case("nan.xorsign.abs.bf16x2",
960 Intrinsic::nvvm_fmax_nan_xorsign_abs_bf16x2)
961 .Case("xorsign.abs.bf16", Intrinsic::nvvm_fmax_xorsign_abs_bf16)
962 .Case("xorsign.abs.bf16x2", Intrinsic::nvvm_fmax_xorsign_abs_bf16x2)
964
965 if (Name.consume_front("fmin."))
967 .Case("bf16", Intrinsic::nvvm_fmin_bf16)
968 .Case("bf16x2", Intrinsic::nvvm_fmin_bf16x2)
969 .Case("ftz.bf16", Intrinsic::nvvm_fmin_ftz_bf16)
970 .Case("ftz.bf16x2", Intrinsic::nvvm_fmin_ftz_bf16x2)
971 .Case("ftz.nan.bf16", Intrinsic::nvvm_fmin_ftz_nan_bf16)
972 .Case("ftz.nan.bf16x2", Intrinsic::nvvm_fmin_ftz_nan_bf16x2)
973 .Case("ftz.nan.xorsign.abs.bf16",
974 Intrinsic::nvvm_fmin_ftz_nan_xorsign_abs_bf16)
975 .Case("ftz.nan.xorsign.abs.bf16x2",
976 Intrinsic::nvvm_fmin_ftz_nan_xorsign_abs_bf16x2)
977 .Case("ftz.xorsign.abs.bf16", Intrinsic::nvvm_fmin_ftz_xorsign_abs_bf16)
978 .Case("ftz.xorsign.abs.bf16x2",
979 Intrinsic::nvvm_fmin_ftz_xorsign_abs_bf16x2)
980 .Case("nan.bf16", Intrinsic::nvvm_fmin_nan_bf16)
981 .Case("nan.bf16x2", Intrinsic::nvvm_fmin_nan_bf16x2)
982 .Case("nan.xorsign.abs.bf16", Intrinsic::nvvm_fmin_nan_xorsign_abs_bf16)
983 .Case("nan.xorsign.abs.bf16x2",
984 Intrinsic::nvvm_fmin_nan_xorsign_abs_bf16x2)
985 .Case("xorsign.abs.bf16", Intrinsic::nvvm_fmin_xorsign_abs_bf16)
986 .Case("xorsign.abs.bf16x2", Intrinsic::nvvm_fmin_xorsign_abs_bf16x2)
988
989 if (Name.consume_front("neg."))
991 .Case("bf16", Intrinsic::nvvm_neg_bf16)
992 .Case("bf16x2", Intrinsic::nvvm_neg_bf16x2)
994
996}
997
999 bool CanUpgradeDebugIntrinsicsToRecords) {
1000 assert(F && "Illegal to upgrade a non-existent Function.");
1001
1002 StringRef Name = F->getName();
1003
1004 // Quickly eliminate it, if it's not a candidate.
1005 if (!Name.consume_front("llvm.") || Name.empty())
1006 return false;
1007
1008 switch (Name[0]) {
1009 default: break;
1010 case 'a': {
1011 bool IsArm = Name.consume_front("arm.");
1012 if (IsArm || Name.consume_front("aarch64.")) {
1013 if (upgradeArmOrAarch64IntrinsicFunction(IsArm, F, Name, NewFn))
1014 return true;
1015 break;
1016 }
1017
1018 if (Name.consume_front("amdgcn.")) {
1019 if (Name == "alignbit") {
1020 // Target specific intrinsic became redundant
1021 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::fshr,
1022 {F->getReturnType()});
1023 return true;
1024 }
1025
1026 if (Name.consume_front("atomic.")) {
1027 if (Name.starts_with("inc") || Name.starts_with("dec")) {
1028 // These were replaced with atomicrmw uinc_wrap and udec_wrap, so
1029 // there's no new declaration.
1030 NewFn = nullptr;
1031 return true;
1032 }
1033 break; // No other 'amdgcn.atomic.*'
1034 }
1035
1036 if (Name.starts_with("ldexp.")) {
1037 // Target specific intrinsic became redundant
1039 F->getParent(), Intrinsic::ldexp,
1040 {F->getReturnType(), F->getArg(1)->getType()});
1041 return true;
1042 }
1043 break; // No other 'amdgcn.*'
1044 }
1045
1046 break;
1047 }
1048 case 'c': {
1049 if (F->arg_size() == 1) {
1051 .StartsWith("ctlz.", Intrinsic::ctlz)
1052 .StartsWith("cttz.", Intrinsic::cttz)
1055 rename(F);
1056 NewFn = Intrinsic::getDeclaration(F->getParent(), ID,
1057 F->arg_begin()->getType());
1058 return true;
1059 }
1060 }
1061
1062 if (F->arg_size() == 2 && Name.equals("coro.end")) {
1063 rename(F);
1064 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::coro_end);
1065 return true;
1066 }
1067
1068 break;
1069 }
1070 case 'd':
1071 if (Name.consume_front("dbg.")) {
1072 // Mark debug intrinsics for upgrade to new debug format.
1073 if (CanUpgradeDebugIntrinsicsToRecords &&
1074 F->getParent()->IsNewDbgInfoFormat) {
1075 if (Name == "addr" || Name == "value" || Name == "assign" ||
1076 Name == "declare" || Name == "label") {
1077 // There's no function to replace these with.
1078 NewFn = nullptr;
1079 // But we do want these to get upgraded.
1080 return true;
1081 }
1082 }
1083 // Update llvm.dbg.addr intrinsics even in "new debug mode"; they'll get
1084 // converted to DbgVariableRecords later.
1085 if (Name == "addr" || (Name == "value" && F->arg_size() == 4)) {
1086 rename(F);
1087 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::dbg_value);
1088 return true;
1089 }
1090 break; // No other 'dbg.*'.
1091 }
1092 break;
1093 case 'e':
1094 if (Name.consume_front("experimental.vector.")) {
1097 .StartsWith("extract.", Intrinsic::vector_extract)
1098 .StartsWith("insert.", Intrinsic::vector_insert)
1099 .StartsWith("splice.", Intrinsic::vector_splice)
1100 .StartsWith("reverse.", Intrinsic::vector_reverse)
1101 .StartsWith("interleave2.", Intrinsic::vector_interleave2)
1102 .StartsWith("deinterleave2.", Intrinsic::vector_deinterleave2)
1105 const auto *FT = F->getFunctionType();
1107 if (ID == Intrinsic::vector_extract ||
1108 ID == Intrinsic::vector_interleave2)
1109 // Extracting overloads the return type.
1110 Tys.push_back(FT->getReturnType());
1111 if (ID != Intrinsic::vector_interleave2)
1112 Tys.push_back(FT->getParamType(0));
1113 if (ID == Intrinsic::vector_insert)
1114 // Inserting overloads the inserted type.
1115 Tys.push_back(FT->getParamType(1));
1116 rename(F);
1117 NewFn = Intrinsic::getDeclaration(F->getParent(), ID, Tys);
1118 return true;
1119 }
1120
1121 if (Name.consume_front("reduce.")) {
1123 static const Regex R("^([a-z]+)\\.[a-z][0-9]+");
1124 if (R.match(Name, &Groups))
1126 .Case("add", Intrinsic::vector_reduce_add)
1127 .Case("mul", Intrinsic::vector_reduce_mul)
1128 .Case("and", Intrinsic::vector_reduce_and)
1129 .Case("or", Intrinsic::vector_reduce_or)
1130 .Case("xor", Intrinsic::vector_reduce_xor)
1131 .Case("smax", Intrinsic::vector_reduce_smax)
1132 .Case("smin", Intrinsic::vector_reduce_smin)
1133 .Case("umax", Intrinsic::vector_reduce_umax)
1134 .Case("umin", Intrinsic::vector_reduce_umin)
1135 .Case("fmax", Intrinsic::vector_reduce_fmax)
1136 .Case("fmin", Intrinsic::vector_reduce_fmin)
1138
1139 bool V2 = false;
1141 static const Regex R2("^v2\\.([a-z]+)\\.[fi][0-9]+");
1142 Groups.clear();
1143 V2 = true;
1144 if (R2.match(Name, &Groups))
1146 .Case("fadd", Intrinsic::vector_reduce_fadd)
1147 .Case("fmul", Intrinsic::vector_reduce_fmul)
1149 }
1151 rename(F);
1152 auto Args = F->getFunctionType()->params();
1153 NewFn =
1154 Intrinsic::getDeclaration(F->getParent(), ID, {Args[V2 ? 1 : 0]});
1155 return true;
1156 }
1157 break; // No other 'expermental.vector.reduce.*'.
1158 }
1159 break; // No other 'experimental.vector.*'.
1160 }
1161 break; // No other 'e*'.
1162 case 'f':
1163 if (Name.starts_with("flt.rounds")) {
1164 rename(F);
1165 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::get_rounding);
1166 return true;
1167 }
1168 break;
1169 case 'i':
1170 if (Name.starts_with("invariant.group.barrier")) {
1171 // Rename invariant.group.barrier to launder.invariant.group
1172 auto Args = F->getFunctionType()->params();
1173 Type* ObjectPtr[1] = {Args[0]};
1174 rename(F);
1175 NewFn = Intrinsic::getDeclaration(F->getParent(),
1176 Intrinsic::launder_invariant_group, ObjectPtr);
1177 return true;
1178 }
1179 break;
1180 case 'm': {
1181 // Updating the memory intrinsics (memcpy/memmove/memset) that have an
1182 // alignment parameter to embedding the alignment as an attribute of
1183 // the pointer args.
1184 if (unsigned ID = StringSwitch<unsigned>(Name)
1185 .StartsWith("memcpy.", Intrinsic::memcpy)
1186 .StartsWith("memmove.", Intrinsic::memmove)
1187 .Default(0)) {
1188 if (F->arg_size() == 5) {
1189 rename(F);
1190 // Get the types of dest, src, and len
1191 ArrayRef<Type *> ParamTypes =
1192 F->getFunctionType()->params().slice(0, 3);
1193 NewFn = Intrinsic::getDeclaration(F->getParent(), ID, ParamTypes);
1194 return true;
1195 }
1196 }
1197 if (Name.starts_with("memset.") && F->arg_size() == 5) {
1198 rename(F);
1199 // Get the types of dest, and len
1200 const auto *FT = F->getFunctionType();
1201 Type *ParamTypes[2] = {
1202 FT->getParamType(0), // Dest
1203 FT->getParamType(2) // len
1204 };
1205 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::memset,
1206 ParamTypes);
1207 return true;
1208 }
1209 break;
1210 }
1211 case 'n': {
1212 if (Name.consume_front("nvvm.")) {
1213 // Check for nvvm intrinsics corresponding exactly to an LLVM intrinsic.
1214 if (F->arg_size() == 1) {
1215 Intrinsic::ID IID =
1217 .Cases("brev32", "brev64", Intrinsic::bitreverse)
1218 .Case("clz.i", Intrinsic::ctlz)
1219 .Case("popc.i", Intrinsic::ctpop)
1221 if (IID != Intrinsic::not_intrinsic) {
1222 NewFn = Intrinsic::getDeclaration(F->getParent(), IID,
1223 {F->getReturnType()});
1224 return true;
1225 }
1226 }
1227
1228 // Check for nvvm intrinsics that need a return type adjustment.
1229 if (!F->getReturnType()->getScalarType()->isBFloatTy()) {
1231 if (IID != Intrinsic::not_intrinsic) {
1232 NewFn = nullptr;
1233 return true;
1234 }
1235 }
1236
1237 // The following nvvm intrinsics correspond exactly to an LLVM idiom, but
1238 // not to an intrinsic alone. We expand them in UpgradeIntrinsicCall.
1239 //
1240 // TODO: We could add lohi.i2d.
1241 bool Expand = false;
1242 if (Name.consume_front("abs."))
1243 // nvvm.abs.{i,ii}
1244 Expand = Name == "i" || Name == "ll";
1245 else if (Name == "clz.ll" || Name == "popc.ll" || Name == "h2f")
1246 Expand = true;
1247 else if (Name.consume_front("max.") || Name.consume_front("min."))
1248 // nvvm.{min,max}.{i,ii,ui,ull}
1249 Expand = Name == "s" || Name == "i" || Name == "ll" || Name == "us" ||
1250 Name == "ui" || Name == "ull";
1251 else if (Name.consume_front("atomic.load.add."))
1252 // nvvm.atomic.load.add.{f32.p,f64.p}
1253 Expand = Name.starts_with("f32.p") || Name.starts_with("f64.p");
1254 else
1255 Expand = false;
1256
1257 if (Expand) {
1258 NewFn = nullptr;
1259 return true;
1260 }
1261 break; // No other 'nvvm.*'.
1262 }
1263 break;
1264 }
1265 case 'o':
1266 // We only need to change the name to match the mangling including the
1267 // address space.
1268 if (Name.starts_with("objectsize.")) {
1269 Type *Tys[2] = { F->getReturnType(), F->arg_begin()->getType() };
1270 if (F->arg_size() == 2 || F->arg_size() == 3 ||
1271 F->getName() !=
1272 Intrinsic::getName(Intrinsic::objectsize, Tys, F->getParent())) {
1273 rename(F);
1274 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::objectsize,
1275 Tys);
1276 return true;
1277 }
1278 }
1279 break;
1280
1281 case 'p':
1282 if (Name.starts_with("ptr.annotation.") && F->arg_size() == 4) {
1283 rename(F);
1285 F->getParent(), Intrinsic::ptr_annotation,
1286 {F->arg_begin()->getType(), F->getArg(1)->getType()});
1287 return true;
1288 }
1289 break;
1290
1291 case 'r': {
1292 if (Name.consume_front("riscv.")) {
1295 .Case("aes32dsi", Intrinsic::riscv_aes32dsi)
1296 .Case("aes32dsmi", Intrinsic::riscv_aes32dsmi)
1297 .Case("aes32esi", Intrinsic::riscv_aes32esi)
1298 .Case("aes32esmi", Intrinsic::riscv_aes32esmi)
1301 if (!F->getFunctionType()->getParamType(2)->isIntegerTy(32)) {
1302 rename(F);
1303 NewFn = Intrinsic::getDeclaration(F->getParent(), ID);
1304 return true;
1305 }
1306 break; // No other applicable upgrades.
1307 }
1308
1310 .StartsWith("sm4ks", Intrinsic::riscv_sm4ks)
1311 .StartsWith("sm4ed", Intrinsic::riscv_sm4ed)
1314 if (!F->getFunctionType()->getParamType(2)->isIntegerTy(32) ||
1315 F->getFunctionType()->getReturnType()->isIntegerTy(64)) {
1316 rename(F);
1317 NewFn = Intrinsic::getDeclaration(F->getParent(), ID);
1318 return true;
1319 }
1320 break; // No other applicable upgrades.
1321 }
1322
1324 .StartsWith("sha256sig0", Intrinsic::riscv_sha256sig0)
1325 .StartsWith("sha256sig1", Intrinsic::riscv_sha256sig1)
1326 .StartsWith("sha256sum0", Intrinsic::riscv_sha256sum0)
1327 .StartsWith("sha256sum1", Intrinsic::riscv_sha256sum1)
1328 .StartsWith("sm3p0", Intrinsic::riscv_sm3p0)
1329 .StartsWith("sm3p1", Intrinsic::riscv_sm3p1)
1332 if (F->getFunctionType()->getReturnType()->isIntegerTy(64)) {
1333 rename(F);
1334 NewFn = Intrinsic::getDeclaration(F->getParent(), ID);
1335 return true;
1336 }
1337 break; // No other applicable upgrades.
1338 }
1339 break; // No other 'riscv.*' intrinsics
1340 }
1341 } break;
1342
1343 case 's':
1344 if (Name == "stackprotectorcheck") {
1345 NewFn = nullptr;
1346 return true;
1347 }
1348 break;
1349
1350 case 'v': {
1351 if (Name == "var.annotation" && F->arg_size() == 4) {
1352 rename(F);
1354 F->getParent(), Intrinsic::var_annotation,
1355 {{F->arg_begin()->getType(), F->getArg(1)->getType()}});
1356 return true;
1357 }
1358 break;
1359 }
1360
1361 case 'w':
1362 if (Name.consume_front("wasm.")) {
1365 .StartsWith("fma.", Intrinsic::wasm_relaxed_madd)
1366 .StartsWith("fms.", Intrinsic::wasm_relaxed_nmadd)
1367 .StartsWith("laneselect.", Intrinsic::wasm_relaxed_laneselect)
1370 rename(F);
1371 NewFn =
1372 Intrinsic::getDeclaration(F->getParent(), ID, F->getReturnType());
1373 return true;
1374 }
1375
1376 if (Name.consume_front("dot.i8x16.i7x16.")) {
1378 .Case("signed", Intrinsic::wasm_relaxed_dot_i8x16_i7x16_signed)
1379 .Case("add.signed",
1380 Intrinsic::wasm_relaxed_dot_i8x16_i7x16_add_signed)
1383 rename(F);
1384 NewFn = Intrinsic::getDeclaration(F->getParent(), ID);
1385 return true;
1386 }
1387 break; // No other 'wasm.dot.i8x16.i7x16.*'.
1388 }
1389 break; // No other 'wasm.*'.
1390 }
1391 break;
1392
1393 case 'x':
1394 if (upgradeX86IntrinsicFunction(F, Name, NewFn))
1395 return true;
1396 }
1397
1398 auto *ST = dyn_cast<StructType>(F->getReturnType());
1399 if (ST && (!ST->isLiteral() || ST->isPacked()) &&
1400 F->getIntrinsicID() != Intrinsic::not_intrinsic) {
1401 // Replace return type with literal non-packed struct. Only do this for
1402 // intrinsics declared to return a struct, not for intrinsics with
1403 // overloaded return type, in which case the exact struct type will be
1404 // mangled into the name.
1407 if (Desc.front().Kind == Intrinsic::IITDescriptor::Struct) {
1408 auto *FT = F->getFunctionType();
1409 auto *NewST = StructType::get(ST->getContext(), ST->elements());
1410 auto *NewFT = FunctionType::get(NewST, FT->params(), FT->isVarArg());
1411 std::string Name = F->getName().str();
1412 rename(F);
1413 NewFn = Function::Create(NewFT, F->getLinkage(), F->getAddressSpace(),
1414 Name, F->getParent());
1415
1416 // The new function may also need remangling.
1417 if (auto Result = llvm::Intrinsic::remangleIntrinsicFunction(NewFn))
1418 NewFn = *Result;
1419 return true;
1420 }
1421 }
1422
1423 // Remangle our intrinsic since we upgrade the mangling
1425 if (Result != std::nullopt) {
1426 NewFn = *Result;
1427 return true;
1428 }
1429
1430 // This may not belong here. This function is effectively being overloaded
1431 // to both detect an intrinsic which needs upgrading, and to provide the
1432 // upgraded form of the intrinsic. We should perhaps have two separate
1433 // functions for this.
1434 return false;
1435}
1436
1438 bool CanUpgradeDebugIntrinsicsToRecords) {
1439 NewFn = nullptr;
1440 bool Upgraded =
1441 upgradeIntrinsicFunction1(F, NewFn, CanUpgradeDebugIntrinsicsToRecords);
1442 assert(F != NewFn && "Intrinsic function upgraded to the same function");
1443
1444 // Upgrade intrinsic attributes. This does not change the function.
1445 if (NewFn)
1446 F = NewFn;
1447 if (Intrinsic::ID id = F->getIntrinsicID())
1448 F->setAttributes(Intrinsic::getAttributes(F->getContext(), id));
1449 return Upgraded;
1450}
1451
1453 if (!(GV->hasName() && (GV->getName() == "llvm.global_ctors" ||
1454 GV->getName() == "llvm.global_dtors")) ||
1455 !GV->hasInitializer())
1456 return nullptr;
1457 ArrayType *ATy = dyn_cast<ArrayType>(GV->getValueType());
1458 if (!ATy)
1459 return nullptr;
1460 StructType *STy = dyn_cast<StructType>(ATy->getElementType());
1461 if (!STy || STy->getNumElements() != 2)
1462 return nullptr;
1463
1464 LLVMContext &C = GV->getContext();
1465 IRBuilder<> IRB(C);
1466 auto EltTy = StructType::get(STy->getElementType(0), STy->getElementType(1),
1467 IRB.getPtrTy());
1468 Constant *Init = GV->getInitializer();
1469 unsigned N = Init->getNumOperands();
1470 std::vector<Constant *> NewCtors(N);
1471 for (unsigned i = 0; i != N; ++i) {
1472 auto Ctor = cast<Constant>(Init->getOperand(i));
1473 NewCtors[i] = ConstantStruct::get(EltTy, Ctor->getAggregateElement(0u),
1474 Ctor->getAggregateElement(1),
1476 }
1477 Constant *NewInit = ConstantArray::get(ArrayType::get(EltTy, N), NewCtors);
1478
1479 return new GlobalVariable(NewInit->getType(), false, GV->getLinkage(),
1480 NewInit, GV->getName());
1481}
1482
1483// Handles upgrading SSE2/AVX2/AVX512BW PSLLDQ intrinsics by converting them
1484// to byte shuffles.
1486 unsigned Shift) {
1487 auto *ResultTy = cast<FixedVectorType>(Op->getType());
1488 unsigned NumElts = ResultTy->getNumElements() * 8;
1489
1490 // Bitcast from a 64-bit element type to a byte element type.
1491 Type *VecTy = FixedVectorType::get(Builder.getInt8Ty(), NumElts);
1492 Op = Builder.CreateBitCast(Op, VecTy, "cast");
1493
1494 // We'll be shuffling in zeroes.
1495 Value *Res = Constant::getNullValue(VecTy);
1496
1497 // If shift is less than 16, emit a shuffle to move the bytes. Otherwise,
1498 // we'll just return the zero vector.
1499 if (Shift < 16) {
1500 int Idxs[64];
1501 // 256/512-bit version is split into 2/4 16-byte lanes.
1502 for (unsigned l = 0; l != NumElts; l += 16)
1503 for (unsigned i = 0; i != 16; ++i) {
1504 unsigned Idx = NumElts + i - Shift;
1505 if (Idx < NumElts)
1506 Idx -= NumElts - 16; // end of lane, switch operand.
1507 Idxs[l + i] = Idx + l;
1508 }
1509
1510 Res = Builder.CreateShuffleVector(Res, Op, ArrayRef(Idxs, NumElts));
1511 }
1512
1513 // Bitcast back to a 64-bit element type.
1514 return Builder.CreateBitCast(Res, ResultTy, "cast");
1515}
1516
1517// Handles upgrading SSE2/AVX2/AVX512BW PSRLDQ intrinsics by converting them
1518// to byte shuffles.
1520 unsigned Shift) {
1521 auto *ResultTy = cast<FixedVectorType>(Op->getType());
1522 unsigned NumElts = ResultTy->getNumElements() * 8;
1523
1524 // Bitcast from a 64-bit element type to a byte element type.
1525 Type *VecTy = FixedVectorType::get(Builder.getInt8Ty(), NumElts);
1526 Op = Builder.CreateBitCast(Op, VecTy, "cast");
1527
1528 // We'll be shuffling in zeroes.
1529 Value *Res = Constant::getNullValue(VecTy);
1530
1531 // If shift is less than 16, emit a shuffle to move the bytes. Otherwise,
1532 // we'll just return the zero vector.
1533 if (Shift < 16) {
1534 int Idxs[64];
1535 // 256/512-bit version is split into 2/4 16-byte lanes.
1536 for (unsigned l = 0; l != NumElts; l += 16)
1537 for (unsigned i = 0; i != 16; ++i) {
1538 unsigned Idx = i + Shift;
1539 if (Idx >= 16)
1540 Idx += NumElts - 16; // end of lane, switch operand.
1541 Idxs[l + i] = Idx + l;
1542 }
1543
1544 Res = Builder.CreateShuffleVector(Op, Res, ArrayRef(Idxs, NumElts));
1545 }
1546
1547 // Bitcast back to a 64-bit element type.
1548 return Builder.CreateBitCast(Res, ResultTy, "cast");
1549}
1550
1551static Value *getX86MaskVec(IRBuilder<> &Builder, Value *Mask,
1552 unsigned NumElts) {
1553 assert(isPowerOf2_32(NumElts) && "Expected power-of-2 mask elements");
1555 Builder.getInt1Ty(), cast<IntegerType>(Mask->getType())->getBitWidth());
1556 Mask = Builder.CreateBitCast(Mask, MaskTy);
1557
1558 // If we have less than 8 elements (1, 2 or 4), then the starting mask was an
1559 // i8 and we need to extract down to the right number of elements.
1560 if (NumElts <= 4) {
1561 int Indices[4];
1562 for (unsigned i = 0; i != NumElts; ++i)
1563 Indices[i] = i;
1564 Mask = Builder.CreateShuffleVector(Mask, Mask, ArrayRef(Indices, NumElts),
1565 "extract");
1566 }
1567
1568 return Mask;
1569}
1570
1571static Value *emitX86Select(IRBuilder<> &Builder, Value *Mask, Value *Op0,
1572 Value *Op1) {
1573 // If the mask is all ones just emit the first operation.
1574 if (const auto *C = dyn_cast<Constant>(Mask))
1575 if (C->isAllOnesValue())
1576 return Op0;
1577
1578 Mask = getX86MaskVec(Builder, Mask,
1579 cast<FixedVectorType>(Op0->getType())->getNumElements());
1580 return Builder.CreateSelect(Mask, Op0, Op1);
1581}
1582
1583static Value *emitX86ScalarSelect(IRBuilder<> &Builder, Value *Mask, Value *Op0,
1584 Value *Op1) {
1585 // If the mask is all ones just emit the first operation.
1586 if (const auto *C = dyn_cast<Constant>(Mask))
1587 if (C->isAllOnesValue())
1588 return Op0;
1589
1590 auto *MaskTy = FixedVectorType::get(Builder.getInt1Ty(),
1591 Mask->getType()->getIntegerBitWidth());
1592 Mask = Builder.CreateBitCast(Mask, MaskTy);
1593 Mask = Builder.CreateExtractElement(Mask, (uint64_t)0);
1594 return Builder.CreateSelect(Mask, Op0, Op1);
1595}
1596
1597// Handle autoupgrade for masked PALIGNR and VALIGND/Q intrinsics.
1598// PALIGNR handles large immediates by shifting while VALIGN masks the immediate
1599// so we need to handle both cases. VALIGN also doesn't have 128-bit lanes.
1601 Value *Op1, Value *Shift,
1602 Value *Passthru, Value *Mask,
1603 bool IsVALIGN) {
1604 unsigned ShiftVal = cast<llvm::ConstantInt>(Shift)->getZExtValue();
1605
1606 unsigned NumElts = cast<FixedVectorType>(Op0->getType())->getNumElements();
1607 assert((IsVALIGN || NumElts % 16 == 0) && "Illegal NumElts for PALIGNR!");
1608 assert((!IsVALIGN || NumElts <= 16) && "NumElts too large for VALIGN!");
1609 assert(isPowerOf2_32(NumElts) && "NumElts not a power of 2!");
1610
1611 // Mask the immediate for VALIGN.
1612 if (IsVALIGN)
1613 ShiftVal &= (NumElts - 1);
1614
1615 // If palignr is shifting the pair of vectors more than the size of two
1616 // lanes, emit zero.
1617 if (ShiftVal >= 32)
1619
1620 // If palignr is shifting the pair of input vectors more than one lane,
1621 // but less than two lanes, convert to shifting in zeroes.
1622 if (ShiftVal > 16) {
1623 ShiftVal -= 16;
1624 Op1 = Op0;
1626 }
1627
1628 int Indices[64];
1629 // 256-bit palignr operates on 128-bit lanes so we need to handle that
1630 for (unsigned l = 0; l < NumElts; l += 16) {
1631 for (unsigned i = 0; i != 16; ++i) {
1632 unsigned Idx = ShiftVal + i;
1633 if (!IsVALIGN && Idx >= 16) // Disable wrap for VALIGN.
1634 Idx += NumElts - 16; // End of lane, switch operand.
1635 Indices[l + i] = Idx + l;
1636 }
1637 }
1638
1639 Value *Align = Builder.CreateShuffleVector(
1640 Op1, Op0, ArrayRef(Indices, NumElts), "palignr");
1641
1642 return emitX86Select(Builder, Mask, Align, Passthru);
1643}
1644
1646 bool ZeroMask, bool IndexForm) {
1647 Type *Ty = CI.getType();
1648 unsigned VecWidth = Ty->getPrimitiveSizeInBits();
1649 unsigned EltWidth = Ty->getScalarSizeInBits();
1650 bool IsFloat = Ty->isFPOrFPVectorTy();
1651 Intrinsic::ID IID;
1652 if (VecWidth == 128 && EltWidth == 32 && IsFloat)
1653 IID = Intrinsic::x86_avx512_vpermi2var_ps_128;
1654 else if (VecWidth == 128 && EltWidth == 32 && !IsFloat)
1655 IID = Intrinsic::x86_avx512_vpermi2var_d_128;
1656 else if (VecWidth == 128 && EltWidth == 64 && IsFloat)
1657 IID = Intrinsic::x86_avx512_vpermi2var_pd_128;
1658 else if (VecWidth == 128 && EltWidth == 64 && !IsFloat)
1659 IID = Intrinsic::x86_avx512_vpermi2var_q_128;
1660 else if (VecWidth == 256 && EltWidth == 32 && IsFloat)
1661 IID = Intrinsic::x86_avx512_vpermi2var_ps_256;
1662 else if (VecWidth == 256 && EltWidth == 32 && !IsFloat)
1663 IID = Intrinsic::x86_avx512_vpermi2var_d_256;
1664 else if (VecWidth == 256 && EltWidth == 64 && IsFloat)
1665 IID = Intrinsic::x86_avx512_vpermi2var_pd_256;
1666 else if (VecWidth == 256 && EltWidth == 64 && !IsFloat)
1667 IID = Intrinsic::x86_avx512_vpermi2var_q_256;
1668 else if (VecWidth == 512 && EltWidth == 32 && IsFloat)
1669 IID = Intrinsic::x86_avx512_vpermi2var_ps_512;
1670 else if (VecWidth == 512 && EltWidth == 32 && !IsFloat)
1671 IID = Intrinsic::x86_avx512_vpermi2var_d_512;
1672 else if (VecWidth == 512 && EltWidth == 64 && IsFloat)
1673 IID = Intrinsic::x86_avx512_vpermi2var_pd_512;
1674 else if (VecWidth == 512 && EltWidth == 64 && !IsFloat)
1675 IID = Intrinsic::x86_avx512_vpermi2var_q_512;
1676 else if (VecWidth == 128 && EltWidth == 16)
1677 IID = Intrinsic::x86_avx512_vpermi2var_hi_128;
1678 else if (VecWidth == 256 && EltWidth == 16)
1679 IID = Intrinsic::x86_avx512_vpermi2var_hi_256;
1680 else if (VecWidth == 512 && EltWidth == 16)
1681 IID = Intrinsic::x86_avx512_vpermi2var_hi_512;
1682 else if (VecWidth == 128 && EltWidth == 8)
1683 IID = Intrinsic::x86_avx512_vpermi2var_qi_128;
1684 else if (VecWidth == 256 && EltWidth == 8)
1685 IID = Intrinsic::x86_avx512_vpermi2var_qi_256;
1686 else if (VecWidth == 512 && EltWidth == 8)
1687 IID = Intrinsic::x86_avx512_vpermi2var_qi_512;
1688 else
1689 llvm_unreachable("Unexpected intrinsic");
1690
1691 Value *Args[] = { CI.getArgOperand(0) , CI.getArgOperand(1),
1692 CI.getArgOperand(2) };
1693
1694 // If this isn't index form we need to swap operand 0 and 1.
1695 if (!IndexForm)
1696 std::swap(Args[0], Args[1]);
1697
1698 Value *V = Builder.CreateCall(Intrinsic::getDeclaration(CI.getModule(), IID),
1699 Args);
1700 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(Ty)
1701 : Builder.CreateBitCast(CI.getArgOperand(1),
1702 Ty);
1703 return emitX86Select(Builder, CI.getArgOperand(3), V, PassThru);
1704}
1705
1707 Intrinsic::ID IID) {
1708 Type *Ty = CI.getType();
1709 Value *Op0 = CI.getOperand(0);
1710 Value *Op1 = CI.getOperand(1);
1711 Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID, Ty);
1712 Value *Res = Builder.CreateCall(Intrin, {Op0, Op1});
1713
1714 if (CI.arg_size() == 4) { // For masked intrinsics.
1715 Value *VecSrc = CI.getOperand(2);
1716 Value *Mask = CI.getOperand(3);
1717 Res = emitX86Select(Builder, Mask, Res, VecSrc);
1718 }
1719 return Res;
1720}
1721
1723 bool IsRotateRight) {
1724 Type *Ty = CI.getType();
1725 Value *Src = CI.getArgOperand(0);
1726 Value *Amt = CI.getArgOperand(1);
1727
1728 // Amount may be scalar immediate, in which case create a splat vector.
1729 // Funnel shifts amounts are treated as modulo and types are all power-of-2 so
1730 // we only care about the lowest log2 bits anyway.
1731 if (Amt->getType() != Ty) {
1732 unsigned NumElts = cast<FixedVectorType>(Ty)->getNumElements();
1733 Amt = Builder.CreateIntCast(Amt, Ty->getScalarType(), false);
1734 Amt = Builder.CreateVectorSplat(NumElts, Amt);
1735 }
1736
1737 Intrinsic::ID IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl;
1738 Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID, Ty);
1739 Value *Res = Builder.CreateCall(Intrin, {Src, Src, Amt});
1740
1741 if (CI.arg_size() == 4) { // For masked intrinsics.
1742 Value *VecSrc = CI.getOperand(2);
1743 Value *Mask = CI.getOperand(3);
1744 Res = emitX86Select(Builder, Mask, Res, VecSrc);
1745 }
1746 return Res;
1747}
1748
1749static Value *upgradeX86vpcom(IRBuilder<> &Builder, CallBase &CI, unsigned Imm,
1750 bool IsSigned) {
1751 Type *Ty = CI.getType();
1752 Value *LHS = CI.getArgOperand(0);
1753 Value *RHS = CI.getArgOperand(1);
1754
1755 CmpInst::Predicate Pred;
1756 switch (Imm) {
1757 case 0x0:
1758 Pred = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
1759 break;
1760 case 0x1:
1761 Pred = IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
1762 break;
1763 case 0x2:
1764 Pred = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
1765 break;
1766 case 0x3:
1767 Pred = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
1768 break;
1769 case 0x4:
1770 Pred = ICmpInst::ICMP_EQ;
1771 break;
1772 case 0x5:
1773 Pred = ICmpInst::ICMP_NE;
1774 break;
1775 case 0x6:
1776 return Constant::getNullValue(Ty); // FALSE
1777 case 0x7:
1778 return Constant::getAllOnesValue(Ty); // TRUE
1779 default:
1780 llvm_unreachable("Unknown XOP vpcom/vpcomu predicate");
1781 }
1782
1783 Value *Cmp = Builder.CreateICmp(Pred, LHS, RHS);
1784 Value *Ext = Builder.CreateSExt(Cmp, Ty);
1785 return Ext;
1786}
1787
1789 bool IsShiftRight, bool ZeroMask) {
1790 Type *Ty = CI.getType();
1791 Value *Op0 = CI.getArgOperand(0);
1792 Value *Op1 = CI.getArgOperand(1);
1793 Value *Amt = CI.getArgOperand(2);
1794
1795 if (IsShiftRight)
1796 std::swap(Op0, Op1);
1797
1798 // Amount may be scalar immediate, in which case create a splat vector.
1799 // Funnel shifts amounts are treated as modulo and types are all power-of-2 so
1800 // we only care about the lowest log2 bits anyway.
1801 if (Amt->getType() != Ty) {
1802 unsigned NumElts = cast<FixedVectorType>(Ty)->getNumElements();
1803 Amt = Builder.CreateIntCast(Amt, Ty->getScalarType(), false);
1804 Amt = Builder.CreateVectorSplat(NumElts, Amt);
1805 }
1806
1807 Intrinsic::ID IID = IsShiftRight ? Intrinsic::fshr : Intrinsic::fshl;
1808 Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID, Ty);
1809 Value *Res = Builder.CreateCall(Intrin, {Op0, Op1, Amt});
1810
1811 unsigned NumArgs = CI.arg_size();
1812 if (NumArgs >= 4) { // For masked intrinsics.
1813 Value *VecSrc = NumArgs == 5 ? CI.getArgOperand(3) :
1814 ZeroMask ? ConstantAggregateZero::get(CI.getType()) :
1815 CI.getArgOperand(0);
1816 Value *Mask = CI.getOperand(NumArgs - 1);
1817 Res = emitX86Select(Builder, Mask, Res, VecSrc);
1818 }
1819 return Res;
1820}
1821
1823 Value *Mask, bool Aligned) {
1824 // Cast the pointer to the right type.
1825 Ptr = Builder.CreateBitCast(Ptr,
1826 llvm::PointerType::getUnqual(Data->getType()));
1827 const Align Alignment =
1828 Aligned
1829 ? Align(Data->getType()->getPrimitiveSizeInBits().getFixedValue() / 8)
1830 : Align(1);
1831
1832 // If the mask is all ones just emit a regular store.
1833 if (const auto *C = dyn_cast<Constant>(Mask))
1834 if (C->isAllOnesValue())
1835 return Builder.CreateAlignedStore(Data, Ptr, Alignment);
1836
1837 // Convert the mask from an integer type to a vector of i1.
1838 unsigned NumElts = cast<FixedVectorType>(Data->getType())->getNumElements();
1839 Mask = getX86MaskVec(Builder, Mask, NumElts);
1840 return Builder.CreateMaskedStore(Data, Ptr, Alignment, Mask);
1841}
1842
1844 Value *Passthru, Value *Mask, bool Aligned) {
1845 Type *ValTy = Passthru->getType();
1846 // Cast the pointer to the right type.
1848 const Align Alignment =
1849 Aligned
1850 ? Align(
1852 8)
1853 : Align(1);
1854
1855 // If the mask is all ones just emit a regular store.
1856 if (const auto *C = dyn_cast<Constant>(Mask))
1857 if (C->isAllOnesValue())
1858 return Builder.CreateAlignedLoad(ValTy, Ptr, Alignment);
1859
1860 // Convert the mask from an integer type to a vector of i1.
1861 unsigned NumElts = cast<FixedVectorType>(ValTy)->getNumElements();
1862 Mask = getX86MaskVec(Builder, Mask, NumElts);
1863 return Builder.CreateMaskedLoad(ValTy, Ptr, Alignment, Mask, Passthru);
1864}
1865
1866static Value *upgradeAbs(IRBuilder<> &Builder, CallBase &CI) {
1867 Type *Ty = CI.getType();
1868 Value *Op0 = CI.getArgOperand(0);
1869 Function *F = Intrinsic::getDeclaration(CI.getModule(), Intrinsic::abs, Ty);
1870 Value *Res = Builder.CreateCall(F, {Op0, Builder.getInt1(false)});
1871 if (CI.arg_size() == 3)
1872 Res = emitX86Select(Builder, CI.getArgOperand(2), Res, CI.getArgOperand(1));
1873 return Res;
1874}
1875
1876static Value *upgradePMULDQ(IRBuilder<> &Builder, CallBase &CI, bool IsSigned) {
1877 Type *Ty = CI.getType();
1878
1879 // Arguments have a vXi32 type so cast to vXi64.
1880 Value *LHS = Builder.CreateBitCast(CI.getArgOperand(0), Ty);
1881 Value *RHS = Builder.CreateBitCast(CI.getArgOperand(1), Ty);
1882
1883 if (IsSigned) {
1884 // Shift left then arithmetic shift right.
1885 Constant *ShiftAmt = ConstantInt::get(Ty, 32);
1886 LHS = Builder.CreateShl(LHS, ShiftAmt);
1887 LHS = Builder.CreateAShr(LHS, ShiftAmt);
1888 RHS = Builder.CreateShl(RHS, ShiftAmt);
1889 RHS = Builder.CreateAShr(RHS, ShiftAmt);
1890 } else {
1891 // Clear the upper bits.
1892 Constant *Mask = ConstantInt::get(Ty, 0xffffffff);
1893 LHS = Builder.CreateAnd(LHS, Mask);
1894 RHS = Builder.CreateAnd(RHS, Mask);
1895 }
1896
1897 Value *Res = Builder.CreateMul(LHS, RHS);
1898
1899 if (CI.arg_size() == 4)
1900 Res = emitX86Select(Builder, CI.getArgOperand(3), Res, CI.getArgOperand(2));
1901
1902 return Res;
1903}
1904
1905// Applying mask on vector of i1's and make sure result is at least 8 bits wide.
1907 Value *Mask) {
1908 unsigned NumElts = cast<FixedVectorType>(Vec->getType())->getNumElements();
1909 if (Mask) {
1910 const auto *C = dyn_cast<Constant>(Mask);
1911 if (!C || !C->isAllOnesValue())
1912 Vec = Builder.CreateAnd(Vec, getX86MaskVec(Builder, Mask, NumElts));
1913 }
1914
1915 if (NumElts < 8) {
1916 int Indices[8];
1917 for (unsigned i = 0; i != NumElts; ++i)
1918 Indices[i] = i;
1919 for (unsigned i = NumElts; i != 8; ++i)
1920 Indices[i] = NumElts + i % NumElts;
1921 Vec = Builder.CreateShuffleVector(Vec,
1923 Indices);
1924 }
1925 return Builder.CreateBitCast(Vec, Builder.getIntNTy(std::max(NumElts, 8U)));
1926}
1927
1929 unsigned CC, bool Signed) {
1930 Value *Op0 = CI.getArgOperand(0);
1931 unsigned NumElts = cast<FixedVectorType>(Op0->getType())->getNumElements();
1932
1933 Value *Cmp;
1934 if (CC == 3) {
1936 FixedVectorType::get(Builder.getInt1Ty(), NumElts));
1937 } else if (CC == 7) {
1939 FixedVectorType::get(Builder.getInt1Ty(), NumElts));
1940 } else {
1942 switch (CC) {
1943 default: llvm_unreachable("Unknown condition code");
1944 case 0: Pred = ICmpInst::ICMP_EQ; break;
1945 case 1: Pred = Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; break;
1946 case 2: Pred = Signed ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; break;
1947 case 4: Pred = ICmpInst::ICMP_NE; break;
1948 case 5: Pred = Signed ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; break;
1949 case 6: Pred = Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; break;
1950 }
1951 Cmp = Builder.CreateICmp(Pred, Op0, CI.getArgOperand(1));
1952 }
1953
1954 Value *Mask = CI.getArgOperand(CI.arg_size() - 1);
1955
1956 return applyX86MaskOn1BitsVec(Builder, Cmp, Mask);
1957}
1958
1959// Replace a masked intrinsic with an older unmasked intrinsic.
1961 Intrinsic::ID IID) {
1962 Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID);
1963 Value *Rep = Builder.CreateCall(Intrin,
1964 { CI.getArgOperand(0), CI.getArgOperand(1) });
1965 return emitX86Select(Builder, CI.getArgOperand(3), Rep, CI.getArgOperand(2));
1966}
1967
1969 Value* A = CI.getArgOperand(0);
1970 Value* B = CI.getArgOperand(1);
1971 Value* Src = CI.getArgOperand(2);
1972 Value* Mask = CI.getArgOperand(3);
1973
1974 Value* AndNode = Builder.CreateAnd(Mask, APInt(8, 1));
1975 Value* Cmp = Builder.CreateIsNotNull(AndNode);
1976 Value* Extract1 = Builder.CreateExtractElement(B, (uint64_t)0);
1977 Value* Extract2 = Builder.CreateExtractElement(Src, (uint64_t)0);
1978 Value* Select = Builder.CreateSelect(Cmp, Extract1, Extract2);
1979 return Builder.CreateInsertElement(A, Select, (uint64_t)0);
1980}
1981
1983 Value* Op = CI.getArgOperand(0);
1984 Type* ReturnOp = CI.getType();
1985 unsigned NumElts = cast<FixedVectorType>(CI.getType())->getNumElements();
1986 Value *Mask = getX86MaskVec(Builder, Op, NumElts);
1987 return Builder.CreateSExt(Mask, ReturnOp, "vpmovm2");
1988}
1989
1990// Replace intrinsic with unmasked version and a select.
1992 CallBase &CI, Value *&Rep) {
1993 Name = Name.substr(12); // Remove avx512.mask.
1994
1995 unsigned VecWidth = CI.getType()->getPrimitiveSizeInBits();
1996 unsigned EltWidth = CI.getType()->getScalarSizeInBits();
1997 Intrinsic::ID IID;
1998 if (Name.starts_with("max.p")) {
1999 if (VecWidth == 128 && EltWidth == 32)
2000 IID = Intrinsic::x86_sse_max_ps;
2001 else if (VecWidth == 128 && EltWidth == 64)
2002 IID = Intrinsic::x86_sse2_max_pd;
2003 else if (VecWidth == 256 && EltWidth == 32)
2004 IID = Intrinsic::x86_avx_max_ps_256;
2005 else if (VecWidth == 256 && EltWidth == 64)
2006 IID = Intrinsic::x86_avx_max_pd_256;
2007 else
2008 llvm_unreachable("Unexpected intrinsic");
2009 } else if (Name.starts_with("min.p")) {
2010 if (VecWidth == 128 && EltWidth == 32)
2011 IID = Intrinsic::x86_sse_min_ps;
2012 else if (VecWidth == 128 && EltWidth == 64)
2013 IID = Intrinsic::x86_sse2_min_pd;
2014 else if (VecWidth == 256 && EltWidth == 32)
2015 IID = Intrinsic::x86_avx_min_ps_256;
2016 else if (VecWidth == 256 && EltWidth == 64)
2017 IID = Intrinsic::x86_avx_min_pd_256;
2018 else
2019 llvm_unreachable("Unexpected intrinsic");
2020 } else if (Name.starts_with("pshuf.b.")) {
2021 if (VecWidth == 128)
2022 IID = Intrinsic::x86_ssse3_pshuf_b_128;
2023 else if (VecWidth == 256)
2024 IID = Intrinsic::x86_avx2_pshuf_b;
2025 else if (VecWidth == 512)
2026 IID = Intrinsic::x86_avx512_pshuf_b_512;
2027 else
2028 llvm_unreachable("Unexpected intrinsic");
2029 } else if (Name.starts_with("pmul.hr.sw.")) {
2030 if (VecWidth == 128)
2031 IID = Intrinsic::x86_ssse3_pmul_hr_sw_128;
2032 else if (VecWidth == 256)
2033 IID = Intrinsic::x86_avx2_pmul_hr_sw;
2034 else if (VecWidth == 512)
2035 IID = Intrinsic::x86_avx512_pmul_hr_sw_512;
2036 else
2037 llvm_unreachable("Unexpected intrinsic");
2038 } else if (Name.starts_with("pmulh.w.")) {
2039 if (VecWidth == 128)
2040 IID = Intrinsic::x86_sse2_pmulh_w;
2041 else if (VecWidth == 256)
2042 IID = Intrinsic::x86_avx2_pmulh_w;
2043 else if (VecWidth == 512)
2044 IID = Intrinsic::x86_avx512_pmulh_w_512;
2045 else
2046 llvm_unreachable("Unexpected intrinsic");
2047 } else if (Name.starts_with("pmulhu.w.")) {
2048 if (VecWidth == 128)
2049 IID = Intrinsic::x86_sse2_pmulhu_w;
2050 else if (VecWidth == 256)
2051 IID = Intrinsic::x86_avx2_pmulhu_w;
2052 else if (VecWidth == 512)
2053 IID = Intrinsic::x86_avx512_pmulhu_w_512;
2054 else
2055 llvm_unreachable("Unexpected intrinsic");
2056 } else if (Name.starts_with("pmaddw.d.")) {
2057 if (VecWidth == 128)
2058 IID = Intrinsic::x86_sse2_pmadd_wd;
2059 else if (VecWidth == 256)
2060 IID = Intrinsic::x86_avx2_pmadd_wd;
2061 else if (VecWidth == 512)
2062 IID = Intrinsic::x86_avx512_pmaddw_d_512;
2063 else
2064 llvm_unreachable("Unexpected intrinsic");
2065 } else if (Name.starts_with("pmaddubs.w.")) {
2066 if (VecWidth == 128)
2067 IID = Intrinsic::x86_ssse3_pmadd_ub_sw_128;
2068 else if (VecWidth == 256)
2069 IID = Intrinsic::x86_avx2_pmadd_ub_sw;
2070 else if (VecWidth == 512)
2071 IID = Intrinsic::x86_avx512_pmaddubs_w_512;
2072 else
2073 llvm_unreachable("Unexpected intrinsic");
2074 } else if (Name.starts_with("packsswb.")) {
2075 if (VecWidth == 128)
2076 IID = Intrinsic::x86_sse2_packsswb_128;
2077 else if (VecWidth == 256)
2078 IID = Intrinsic::x86_avx2_packsswb;
2079 else if (VecWidth == 512)
2080 IID = Intrinsic::x86_avx512_packsswb_512;
2081 else
2082 llvm_unreachable("Unexpected intrinsic");
2083 } else if (Name.starts_with("packssdw.")) {
2084 if (VecWidth == 128)
2085 IID = Intrinsic::x86_sse2_packssdw_128;
2086 else if (VecWidth == 256)
2087 IID = Intrinsic::x86_avx2_packssdw;
2088 else if (VecWidth == 512)
2089 IID = Intrinsic::x86_avx512_packssdw_512;
2090 else
2091 llvm_unreachable("Unexpected intrinsic");
2092 } else if (Name.starts_with("packuswb.")) {
2093 if (VecWidth == 128)
2094 IID = Intrinsic::x86_sse2_packuswb_128;
2095 else if (VecWidth == 256)
2096 IID = Intrinsic::x86_avx2_packuswb;
2097 else if (VecWidth == 512)
2098 IID = Intrinsic::x86_avx512_packuswb_512;
2099 else
2100 llvm_unreachable("Unexpected intrinsic");
2101 } else if (Name.starts_with("packusdw.")) {
2102 if (VecWidth == 128)
2103 IID = Intrinsic::x86_sse41_packusdw;
2104 else if (VecWidth == 256)
2105 IID = Intrinsic::x86_avx2_packusdw;
2106 else if (VecWidth == 512)
2107 IID = Intrinsic::x86_avx512_packusdw_512;
2108 else
2109 llvm_unreachable("Unexpected intrinsic");
2110 } else if (Name.starts_with("vpermilvar.")) {
2111 if (VecWidth == 128 && EltWidth == 32)
2112 IID = Intrinsic::x86_avx_vpermilvar_ps;
2113 else if (VecWidth == 128 && EltWidth == 64)
2114 IID = Intrinsic::x86_avx_vpermilvar_pd;
2115 else if (VecWidth == 256 && EltWidth == 32)
2116 IID = Intrinsic::x86_avx_vpermilvar_ps_256;
2117 else if (VecWidth == 256 && EltWidth == 64)
2118 IID = Intrinsic::x86_avx_vpermilvar_pd_256;
2119 else if (VecWidth == 512 && EltWidth == 32)
2120 IID = Intrinsic::x86_avx512_vpermilvar_ps_512;
2121 else if (VecWidth == 512 && EltWidth == 64)
2122 IID = Intrinsic::x86_avx512_vpermilvar_pd_512;
2123 else
2124 llvm_unreachable("Unexpected intrinsic");
2125 } else if (Name == "cvtpd2dq.256") {
2126 IID = Intrinsic::x86_avx_cvt_pd2dq_256;
2127 } else if (Name == "cvtpd2ps.256") {
2128 IID = Intrinsic::x86_avx_cvt_pd2_ps_256;
2129 } else if (Name == "cvttpd2dq.256") {
2130 IID = Intrinsic::x86_avx_cvtt_pd2dq_256;
2131 } else if (Name == "cvttps2dq.128") {
2132 IID = Intrinsic::x86_sse2_cvttps2dq;
2133 } else if (Name == "cvttps2dq.256") {
2134 IID = Intrinsic::x86_avx_cvtt_ps2dq_256;
2135 } else if (Name.starts_with("permvar.")) {
2136 bool IsFloat = CI.getType()->isFPOrFPVectorTy();
2137 if (VecWidth == 256 && EltWidth == 32 && IsFloat)
2138 IID = Intrinsic::x86_avx2_permps;
2139 else if (VecWidth == 256 && EltWidth == 32 && !IsFloat)
2140 IID = Intrinsic::x86_avx2_permd;
2141 else if (VecWidth == 256 && EltWidth == 64 && IsFloat)
2142 IID = Intrinsic::x86_avx512_permvar_df_256;
2143 else if (VecWidth == 256 && EltWidth == 64 && !IsFloat)
2144 IID = Intrinsic::x86_avx512_permvar_di_256;
2145 else if (VecWidth == 512 && EltWidth == 32 && IsFloat)
2146 IID = Intrinsic::x86_avx512_permvar_sf_512;
2147 else if (VecWidth == 512 && EltWidth == 32 && !IsFloat)
2148 IID = Intrinsic::x86_avx512_permvar_si_512;
2149 else if (VecWidth == 512 && EltWidth == 64 && IsFloat)
2150 IID = Intrinsic::x86_avx512_permvar_df_512;
2151 else if (VecWidth == 512 && EltWidth == 64 && !IsFloat)
2152 IID = Intrinsic::x86_avx512_permvar_di_512;
2153 else if (VecWidth == 128 && EltWidth == 16)
2154 IID = Intrinsic::x86_avx512_permvar_hi_128;
2155 else if (VecWidth == 256 && EltWidth == 16)
2156 IID = Intrinsic::x86_avx512_permvar_hi_256;
2157 else if (VecWidth == 512 && EltWidth == 16)
2158 IID = Intrinsic::x86_avx512_permvar_hi_512;
2159 else if (VecWidth == 128 && EltWidth == 8)
2160 IID = Intrinsic::x86_avx512_permvar_qi_128;
2161 else if (VecWidth == 256 && EltWidth == 8)
2162 IID = Intrinsic::x86_avx512_permvar_qi_256;
2163 else if (VecWidth == 512 && EltWidth == 8)
2164 IID = Intrinsic::x86_avx512_permvar_qi_512;
2165 else
2166 llvm_unreachable("Unexpected intrinsic");
2167 } else if (Name.starts_with("dbpsadbw.")) {
2168 if (VecWidth == 128)
2169 IID = Intrinsic::x86_avx512_dbpsadbw_128;
2170 else if (VecWidth == 256)
2171 IID = Intrinsic::x86_avx512_dbpsadbw_256;
2172 else if (VecWidth == 512)
2173 IID = Intrinsic::x86_avx512_dbpsadbw_512;
2174 else
2175 llvm_unreachable("Unexpected intrinsic");
2176 } else if (Name.starts_with("pmultishift.qb.")) {
2177 if (VecWidth == 128)
2178 IID = Intrinsic::x86_avx512_pmultishift_qb_128;
2179 else if (VecWidth == 256)
2180 IID = Intrinsic::x86_avx512_pmultishift_qb_256;
2181 else if (VecWidth == 512)
2182 IID = Intrinsic::x86_avx512_pmultishift_qb_512;
2183 else
2184 llvm_unreachable("Unexpected intrinsic");
2185 } else if (Name.starts_with("conflict.")) {
2186 if (Name[9] == 'd' && VecWidth == 128)
2187 IID = Intrinsic::x86_avx512_conflict_d_128;
2188 else if (Name[9] == 'd' && VecWidth == 256)
2189 IID = Intrinsic::x86_avx512_conflict_d_256;
2190 else if (Name[9] == 'd' && VecWidth == 512)
2191 IID = Intrinsic::x86_avx512_conflict_d_512;
2192 else if (Name[9] == 'q' && VecWidth == 128)
2193 IID = Intrinsic::x86_avx512_conflict_q_128;
2194 else if (Name[9] == 'q' && VecWidth == 256)
2195 IID = Intrinsic::x86_avx512_conflict_q_256;
2196 else if (Name[9] == 'q' && VecWidth == 512)
2197 IID = Intrinsic::x86_avx512_conflict_q_512;
2198 else
2199 llvm_unreachable("Unexpected intrinsic");
2200 } else if (Name.starts_with("pavg.")) {
2201 if (Name[5] == 'b' && VecWidth == 128)
2202 IID = Intrinsic::x86_sse2_pavg_b;
2203 else if (Name[5] == 'b' && VecWidth == 256)
2204 IID = Intrinsic::x86_avx2_pavg_b;
2205 else if (Name[5] == 'b' && VecWidth == 512)
2206 IID = Intrinsic::x86_avx512_pavg_b_512;
2207 else if (Name[5] == 'w' && VecWidth == 128)
2208 IID = Intrinsic::x86_sse2_pavg_w;
2209 else if (Name[5] == 'w' && VecWidth == 256)
2210 IID = Intrinsic::x86_avx2_pavg_w;
2211 else if (Name[5] == 'w' && VecWidth == 512)
2212 IID = Intrinsic::x86_avx512_pavg_w_512;
2213 else
2214 llvm_unreachable("Unexpected intrinsic");
2215 } else
2216 return false;
2217
2218 SmallVector<Value *, 4> Args(CI.args());
2219 Args.pop_back();
2220 Args.pop_back();
2221 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI.getModule(), IID),
2222 Args);
2223 unsigned NumArgs = CI.arg_size();
2224 Rep = emitX86Select(Builder, CI.getArgOperand(NumArgs - 1), Rep,
2225 CI.getArgOperand(NumArgs - 2));
2226 return true;
2227}
2228
2229/// Upgrade comment in call to inline asm that represents an objc retain release
2230/// marker.
2231void llvm::UpgradeInlineAsmString(std::string *AsmStr) {
2232 size_t Pos;
2233 if (AsmStr->find("mov\tfp") == 0 &&
2234 AsmStr->find("objc_retainAutoreleaseReturnValue") != std::string::npos &&
2235 (Pos = AsmStr->find("# marker")) != std::string::npos) {
2236 AsmStr->replace(Pos, 1, ";");
2237 }
2238}
2239
2241 IRBuilder<> &Builder) {
2242 if (Name == "mve.vctp64.old") {
2243 // Replace the old v4i1 vctp64 with a v2i1 vctp and predicate-casts to the
2244 // correct type.
2245 Value *VCTP = Builder.CreateCall(
2246 Intrinsic::getDeclaration(F->getParent(), Intrinsic::arm_mve_vctp64),
2247 CI->getArgOperand(0), CI->getName());
2248 Value *C1 = Builder.CreateCall(
2250 F->getParent(), Intrinsic::arm_mve_pred_v2i,
2251 {VectorType::get(Builder.getInt1Ty(), 2, false)}),
2252 VCTP);
2253 return Builder.CreateCall(
2255 F->getParent(), Intrinsic::arm_mve_pred_i2v,
2256 {VectorType::get(Builder.getInt1Ty(), 4, false)}),
2257 C1);
2258 } else if (Name == "mve.mull.int.predicated.v2i64.v4i32.v4i1" ||
2259 Name == "mve.vqdmull.predicated.v2i64.v4i32.v4i1" ||
2260 Name == "mve.vldr.gather.base.predicated.v2i64.v2i64.v4i1" ||
2261 Name == "mve.vldr.gather.base.wb.predicated.v2i64.v2i64.v4i1" ||
2262 Name ==
2263 "mve.vldr.gather.offset.predicated.v2i64.p0i64.v2i64.v4i1" ||
2264 Name == "mve.vldr.gather.offset.predicated.v2i64.p0.v2i64.v4i1" ||
2265 Name == "mve.vstr.scatter.base.predicated.v2i64.v2i64.v4i1" ||
2266 Name == "mve.vstr.scatter.base.wb.predicated.v2i64.v2i64.v4i1" ||
2267 Name ==
2268 "mve.vstr.scatter.offset.predicated.p0i64.v2i64.v2i64.v4i1" ||
2269 Name == "mve.vstr.scatter.offset.predicated.p0.v2i64.v2i64.v4i1" ||
2270 Name == "cde.vcx1q.predicated.v2i64.v4i1" ||
2271 Name == "cde.vcx1qa.predicated.v2i64.v4i1" ||
2272 Name == "cde.vcx2q.predicated.v2i64.v4i1" ||
2273 Name == "cde.vcx2qa.predicated.v2i64.v4i1" ||
2274 Name == "cde.vcx3q.predicated.v2i64.v4i1" ||
2275 Name == "cde.vcx3qa.predicated.v2i64.v4i1") {
2276 std::vector<Type *> Tys;
2277 unsigned ID = CI->getIntrinsicID();
2278 Type *V2I1Ty = FixedVectorType::get(Builder.getInt1Ty(), 2);
2279 switch (ID) {
2280 case Intrinsic::arm_mve_mull_int_predicated:
2281 case Intrinsic::arm_mve_vqdmull_predicated:
2282 case Intrinsic::arm_mve_vldr_gather_base_predicated:
2283 Tys = {CI->getType(), CI->getOperand(0)->getType(), V2I1Ty};
2284 break;
2285 case Intrinsic::arm_mve_vldr_gather_base_wb_predicated:
2286 case Intrinsic::arm_mve_vstr_scatter_base_predicated:
2287 case Intrinsic::arm_mve_vstr_scatter_base_wb_predicated:
2288 Tys = {CI->getOperand(0)->getType(), CI->getOperand(0)->getType(),
2289 V2I1Ty};
2290 break;
2291 case Intrinsic::arm_mve_vldr_gather_offset_predicated:
2292 Tys = {CI->getType(), CI->getOperand(0)->getType(),
2293 CI->getOperand(1)->getType(), V2I1Ty};
2294 break;
2295 case Intrinsic::arm_mve_vstr_scatter_offset_predicated:
2296 Tys = {CI->getOperand(0)->getType(), CI->getOperand(1)->getType(),
2297 CI->getOperand(2)->getType(), V2I1Ty};
2298 break;
2299 case Intrinsic::arm_cde_vcx1q_predicated:
2300 case Intrinsic::arm_cde_vcx1qa_predicated:
2301 case Intrinsic::arm_cde_vcx2q_predicated:
2302 case Intrinsic::arm_cde_vcx2qa_predicated:
2303 case Intrinsic::arm_cde_vcx3q_predicated:
2304 case Intrinsic::arm_cde_vcx3qa_predicated:
2305 Tys = {CI->getOperand(1)->getType(), V2I1Ty};
2306 break;
2307 default:
2308 llvm_unreachable("Unhandled Intrinsic!");
2309 }
2310
2311 std::vector<Value *> Ops;
2312 for (Value *Op : CI->args()) {
2313 Type *Ty = Op->getType();
2314 if (Ty->getScalarSizeInBits() == 1) {
2315 Value *C1 = Builder.CreateCall(
2317 F->getParent(), Intrinsic::arm_mve_pred_v2i,
2318 {VectorType::get(Builder.getInt1Ty(), 4, false)}),
2319 Op);
2320 Op = Builder.CreateCall(
2321 Intrinsic::getDeclaration(F->getParent(),
2322 Intrinsic::arm_mve_pred_i2v, {V2I1Ty}),
2323 C1);
2324 }
2325 Ops.push_back(Op);
2326 }
2327
2328 Function *Fn = Intrinsic::getDeclaration(F->getParent(), ID, Tys);
2329 return Builder.CreateCall(Fn, Ops, CI->getName());
2330 }
2331 llvm_unreachable("Unknown function for ARM CallBase upgrade.");
2332}
2333
2335 Function *F, IRBuilder<> &Builder) {
2336 const bool IsInc = Name.starts_with("atomic.inc.");
2337 if (IsInc || Name.starts_with("atomic.dec.")) {
2338 if (CI->getNumOperands() != 6) // Malformed bitcode.
2339 return nullptr;
2340
2341 AtomicRMWInst::BinOp RMWOp =
2343
2344 Value *Ptr = CI->getArgOperand(0);
2345 Value *Val = CI->getArgOperand(1);
2346 ConstantInt *OrderArg = dyn_cast<ConstantInt>(CI->getArgOperand(2));
2347 ConstantInt *VolatileArg = dyn_cast<ConstantInt>(CI->getArgOperand(4));
2348
2349 AtomicOrdering Order = AtomicOrdering::SequentiallyConsistent;
2350 if (OrderArg && isValidAtomicOrdering(OrderArg->getZExtValue()))
2351 Order = static_cast<AtomicOrdering>(OrderArg->getZExtValue());
2352 if (Order == AtomicOrdering::NotAtomic ||
2353 Order == AtomicOrdering::Unordered)
2354 Order = AtomicOrdering::SequentiallyConsistent;
2355
2356 // The scope argument never really worked correctly. Use agent as the most
2357 // conservative option which should still always produce the instruction.
2358 SyncScope::ID SSID = F->getContext().getOrInsertSyncScopeID("agent");
2359 AtomicRMWInst *RMW =
2360 Builder.CreateAtomicRMW(RMWOp, Ptr, Val, std::nullopt, Order, SSID);
2361
2362 if (!VolatileArg || !VolatileArg->isZero())
2363 RMW->setVolatile(true);
2364 return RMW;
2365 }
2366
2367 llvm_unreachable("Unknown function for AMDGPU intrinsic upgrade.");
2368}
2369
2370/// Helper to unwrap intrinsic call MetadataAsValue operands.
2371template <typename MDType>
2372static MDType *unwrapMAVOp(CallBase *CI, unsigned Op) {
2373 if (MetadataAsValue *MAV = dyn_cast<MetadataAsValue>(CI->getArgOperand(Op)))
2374 return dyn_cast<MDType>(MAV->getMetadata());
2375 return nullptr;
2376}
2377
2378/// Convert debug intrinsic calls to non-instruction debug records.
2379/// \p Name - Final part of the intrinsic name, e.g. 'value' in llvm.dbg.value.
2380/// \p CI - The debug intrinsic call.
2382 DbgRecord *DR = nullptr;
2383 if (Name == "label") {
2384 DR = new DbgLabelRecord(unwrapMAVOp<DILabel>(CI, 0), CI->getDebugLoc());
2385 } else if (Name == "assign") {
2386 DR = new DbgVariableRecord(
2387 unwrapMAVOp<Metadata>(CI, 0), unwrapMAVOp<DILocalVariable>(CI, 1),
2388 unwrapMAVOp<DIExpression>(CI, 2), unwrapMAVOp<DIAssignID>(CI, 3),
2389 unwrapMAVOp<Metadata>(CI, 4), unwrapMAVOp<DIExpression>(CI, 5),
2390 CI->getDebugLoc());
2391 } else if (Name == "declare") {
2392 DR = new DbgVariableRecord(
2393 unwrapMAVOp<Metadata>(CI, 0), unwrapMAVOp<DILocalVariable>(CI, 1),
2394 unwrapMAVOp<DIExpression>(CI, 2), CI->getDebugLoc(),
2395 DbgVariableRecord::LocationType::Declare);
2396 } else if (Name == "addr") {
2397 // Upgrade dbg.addr to dbg.value with DW_OP_deref.
2398 DIExpression *Expr = unwrapMAVOp<DIExpression>(CI, 2);
2399 Expr = DIExpression::append(Expr, dwarf::DW_OP_deref);
2400 DR = new DbgVariableRecord(unwrapMAVOp<Metadata>(CI, 0),
2401 unwrapMAVOp<DILocalVariable>(CI, 1), Expr,
2402 CI->getDebugLoc());
2403 } else if (Name == "value") {
2404 // An old version of dbg.value had an extra offset argument.
2405 unsigned VarOp = 1;
2406 unsigned ExprOp = 2;
2407 if (CI->arg_size() == 4) {
2408 auto *Offset = dyn_cast_or_null<Constant>(CI->getArgOperand(1));
2409 // Nonzero offset dbg.values get dropped without a replacement.
2410 if (!Offset || !Offset->isZeroValue())
2411 return;
2412 VarOp = 2;
2413 ExprOp = 3;
2414 }
2415 DR = new DbgVariableRecord(
2416 unwrapMAVOp<Metadata>(CI, 0), unwrapMAVOp<DILocalVariable>(CI, VarOp),
2417 unwrapMAVOp<DIExpression>(CI, ExprOp), CI->getDebugLoc());
2418 }
2419 assert(DR && "Unhandled intrinsic kind in upgrade to DbgRecord");
2421}
2422
2423/// Upgrade a call to an old intrinsic. All argument and return casting must be
2424/// provided to seamlessly integrate with existing context.
2426 // Note dyn_cast to Function is not quite the same as getCalledFunction, which
2427 // checks the callee's function type matches. It's likely we need to handle
2428 // type changes here.
2429 Function *F = dyn_cast<Function>(CI->getCalledOperand());
2430 if (!F)
2431 return;
2432
2433 LLVMContext &C = CI->getContext();
2434 IRBuilder<> Builder(C);
2435 Builder.SetInsertPoint(CI->getParent(), CI->getIterator());
2436
2437 if (!NewFn) {
2438 bool FallthroughToDefaultUpgrade = false;
2439 // Get the Function's name.
2440 StringRef Name = F->getName();
2441
2442 assert(Name.starts_with("llvm.") && "Intrinsic doesn't start with 'llvm.'");
2443 Name = Name.substr(5);
2444
2445 bool IsX86 = Name.consume_front("x86.");
2446 bool IsNVVM = Name.consume_front("nvvm.");
2447 bool IsARM = Name.consume_front("arm.");
2448 bool IsAMDGCN = Name.consume_front("amdgcn.");
2449 bool IsDbg = Name.consume_front("dbg.");
2450
2451 if (IsX86 && Name.starts_with("sse4a.movnt.")) {
2453 Elts.push_back(
2454 ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1)));
2455 MDNode *Node = MDNode::get(C, Elts);
2456
2457 Value *Arg0 = CI->getArgOperand(0);
2458 Value *Arg1 = CI->getArgOperand(1);
2459
2460 // Nontemporal (unaligned) store of the 0'th element of the float/double
2461 // vector.
2462 Type *SrcEltTy = cast<VectorType>(Arg1->getType())->getElementType();
2463 PointerType *EltPtrTy = PointerType::getUnqual(SrcEltTy);
2464 Value *Addr = Builder.CreateBitCast(Arg0, EltPtrTy, "cast");
2465 Value *Extract =
2466 Builder.CreateExtractElement(Arg1, (uint64_t)0, "extractelement");
2467
2468 StoreInst *SI = Builder.CreateAlignedStore(Extract, Addr, Align(1));
2469 SI->setMetadata(LLVMContext::MD_nontemporal, Node);
2470
2471 // Remove intrinsic.
2472 CI->eraseFromParent();
2473 return;
2474 }
2475
2476 if (IsX86 && (Name.starts_with("avx.movnt.") ||
2477 Name.starts_with("avx512.storent."))) {
2479 Elts.push_back(
2480 ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1)));
2481 MDNode *Node = MDNode::get(C, Elts);
2482
2483 Value *Arg0 = CI->getArgOperand(0);
2484 Value *Arg1 = CI->getArgOperand(1);
2485
2486 // Convert the type of the pointer to a pointer to the stored type.
2487 Value *BC = Builder.CreateBitCast(Arg0,
2488 PointerType::getUnqual(Arg1->getType()),
2489 "cast");
2490 StoreInst *SI = Builder.CreateAlignedStore(
2491 Arg1, BC,
2493 SI->setMetadata(LLVMContext::MD_nontemporal, Node);
2494
2495 // Remove intrinsic.
2496 CI->eraseFromParent();
2497 return;
2498 }
2499
2500 if (IsX86 && Name == "sse2.storel.dq") {
2501 Value *Arg0 = CI->getArgOperand(0);
2502 Value *Arg1 = CI->getArgOperand(1);
2503
2504 auto *NewVecTy = FixedVectorType::get(Type::getInt64Ty(C), 2);
2505 Value *BC0 = Builder.CreateBitCast(Arg1, NewVecTy, "cast");
2506 Value *Elt = Builder.CreateExtractElement(BC0, (uint64_t)0);
2507 Value *BC = Builder.CreateBitCast(Arg0,
2508 PointerType::getUnqual(Elt->getType()),
2509 "cast");
2510 Builder.CreateAlignedStore(Elt, BC, Align(1));
2511
2512 // Remove intrinsic.
2513 CI->eraseFromParent();
2514 return;
2515 }
2516
2517 if (IsX86 && (Name.starts_with("sse.storeu.") ||
2518 Name.starts_with("sse2.storeu.") ||
2519 Name.starts_with("avx.storeu."))) {
2520 Value *Arg0 = CI->getArgOperand(0);
2521 Value *Arg1 = CI->getArgOperand(1);
2522
2523 Arg0 = Builder.CreateBitCast(Arg0,
2524 PointerType::getUnqual(Arg1->getType()),
2525 "cast");
2526 Builder.CreateAlignedStore(Arg1, Arg0, Align(1));
2527
2528 // Remove intrinsic.
2529 CI->eraseFromParent();
2530 return;
2531 }
2532
2533 if (IsX86 && Name == "avx512.mask.store.ss") {
2534 Value *Mask = Builder.CreateAnd(CI->getArgOperand(2), Builder.getInt8(1));
2535 upgradeMaskedStore(Builder, CI->getArgOperand(0), CI->getArgOperand(1),
2536 Mask, false);
2537
2538 // Remove intrinsic.
2539 CI->eraseFromParent();
2540 return;
2541 }
2542
2543 if (IsX86 && Name.starts_with("avx512.mask.store")) {
2544 // "avx512.mask.storeu." or "avx512.mask.store."
2545 bool Aligned = Name[17] != 'u'; // "avx512.mask.storeu".
2546 upgradeMaskedStore(Builder, CI->getArgOperand(0), CI->getArgOperand(1),
2547 CI->getArgOperand(2), Aligned);
2548
2549 // Remove intrinsic.
2550 CI->eraseFromParent();
2551 return;
2552 }
2553
2554 Value *Rep = nullptr;
2555 // Upgrade packed integer vector compare intrinsics to compare instructions.
2556 if (IsX86 && (Name.starts_with("sse2.pcmp") ||
2557 Name.starts_with("avx2.pcmp"))) {
2558 // "sse2.pcpmpeq." "sse2.pcmpgt." "avx2.pcmpeq." or "avx2.pcmpgt."
2559 bool CmpEq = Name[9] == 'e';
2560 Rep = Builder.CreateICmp(CmpEq ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_SGT,
2561 CI->getArgOperand(0), CI->getArgOperand(1));
2562 Rep = Builder.CreateSExt(Rep, CI->getType(), "");
2563 } else if (IsX86 && (Name.starts_with("avx512.broadcastm"))) {
2564 Type *ExtTy = Type::getInt32Ty(C);
2565 if (CI->getOperand(0)->getType()->isIntegerTy(8))
2566 ExtTy = Type::getInt64Ty(C);
2567 unsigned NumElts = CI->getType()->getPrimitiveSizeInBits() /
2568 ExtTy->getPrimitiveSizeInBits();
2569 Rep = Builder.CreateZExt(CI->getArgOperand(0), ExtTy);
2570 Rep = Builder.CreateVectorSplat(NumElts, Rep);
2571 } else if (IsX86 && (Name == "sse.sqrt.ss" ||
2572 Name == "sse2.sqrt.sd")) {
2573 Value *Vec = CI->getArgOperand(0);
2574 Value *Elt0 = Builder.CreateExtractElement(Vec, (uint64_t)0);
2575 Function *Intr = Intrinsic::getDeclaration(F->getParent(),
2576 Intrinsic::sqrt, Elt0->getType());
2577 Elt0 = Builder.CreateCall(Intr, Elt0);
2578 Rep = Builder.CreateInsertElement(Vec, Elt0, (uint64_t)0);
2579 } else if (IsX86 && (Name.starts_with("avx.sqrt.p") ||
2580 Name.starts_with("sse2.sqrt.p") ||
2581 Name.starts_with("sse.sqrt.p"))) {
2582 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(),
2583 Intrinsic::sqrt,
2584 CI->getType()),
2585 {CI->getArgOperand(0)});
2586 } else if (IsX86 && (Name.starts_with("avx512.mask.sqrt.p"))) {
2587 if (CI->arg_size() == 4 &&
2588 (!isa<ConstantInt>(CI->getArgOperand(3)) ||
2589 cast<ConstantInt>(CI->getArgOperand(3))->getZExtValue() != 4)) {
2590 Intrinsic::ID IID = Name[18] == 's' ? Intrinsic::x86_avx512_sqrt_ps_512
2591 : Intrinsic::x86_avx512_sqrt_pd_512;
2592
2593 Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(3) };
2595 IID), Args);
2596 } else {
2597 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(),
2598 Intrinsic::sqrt,
2599 CI->getType()),
2600 {CI->getArgOperand(0)});
2601 }
2602 Rep = emitX86Select(Builder, CI->getArgOperand(2), Rep,
2603 CI->getArgOperand(1));
2604 } else if (IsX86 && (Name.starts_with("avx512.ptestm") ||
2605 Name.starts_with("avx512.ptestnm"))) {
2606 Value *Op0 = CI->getArgOperand(0);
2607 Value *Op1 = CI->getArgOperand(1);
2608 Value *Mask = CI->getArgOperand(2);
2609 Rep = Builder.CreateAnd(Op0, Op1);
2610 llvm::Type *Ty = Op0->getType();
2612 ICmpInst::Predicate Pred =
2613 Name.starts_with("avx512.ptestm") ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ;
2614 Rep = Builder.CreateICmp(Pred, Rep, Zero);
2615 Rep = applyX86MaskOn1BitsVec(Builder, Rep, Mask);
2616 } else if (IsX86 && (Name.starts_with("avx512.mask.pbroadcast"))){
2617 unsigned NumElts = cast<FixedVectorType>(CI->getArgOperand(1)->getType())
2618 ->getNumElements();
2619 Rep = Builder.CreateVectorSplat(NumElts, CI->getArgOperand(0));
2620 Rep = emitX86Select(Builder, CI->getArgOperand(2), Rep,
2621 CI->getArgOperand(1));
2622 } else if (IsX86 && (Name.starts_with("avx512.kunpck"))) {
2623 unsigned NumElts = CI->getType()->getScalarSizeInBits();
2624 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), NumElts);
2625 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), NumElts);
2626 int Indices[64];
2627 for (unsigned i = 0; i != NumElts; ++i)
2628 Indices[i] = i;
2629
2630 // First extract half of each vector. This gives better codegen than
2631 // doing it in a single shuffle.
2632 LHS =
2633 Builder.CreateShuffleVector(LHS, LHS, ArrayRef(Indices, NumElts / 2));
2634 RHS =
2635 Builder.CreateShuffleVector(RHS, RHS, ArrayRef(Indices, NumElts / 2));
2636 // Concat the vectors.
2637 // NOTE: Operands have to be swapped to match intrinsic definition.
2638 Rep = Builder.CreateShuffleVector(RHS, LHS, ArrayRef(Indices, NumElts));
2639 Rep = Builder.CreateBitCast(Rep, CI->getType());
2640 } else if (IsX86 && Name == "avx512.kand.w") {
2641 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
2642 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
2643 Rep = Builder.CreateAnd(LHS, RHS);
2644 Rep = Builder.CreateBitCast(Rep, CI->getType());
2645 } else if (IsX86 && Name == "avx512.kandn.w") {
2646 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
2647 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
2648 LHS = Builder.CreateNot(LHS);
2649 Rep = Builder.CreateAnd(LHS, RHS);
2650 Rep = Builder.CreateBitCast(Rep, CI->getType());
2651 } else if (IsX86 && Name == "avx512.kor.w") {
2652 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
2653 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
2654 Rep = Builder.CreateOr(LHS, RHS);
2655 Rep = Builder.CreateBitCast(Rep, CI->getType());
2656 } else if (IsX86 && Name == "avx512.kxor.w") {
2657 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
2658 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
2659 Rep = Builder.CreateXor(LHS, RHS);
2660 Rep = Builder.CreateBitCast(Rep, CI->getType());
2661 } else if (IsX86 && Name == "avx512.kxnor.w") {
2662 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
2663 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
2664 LHS = Builder.CreateNot(LHS);
2665 Rep = Builder.CreateXor(LHS, RHS);
2666 Rep = Builder.CreateBitCast(Rep, CI->getType());
2667 } else if (IsX86 && Name == "avx512.knot.w") {
2668 Rep = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
2669 Rep = Builder.CreateNot(Rep);
2670 Rep = Builder.CreateBitCast(Rep, CI->getType());
2671 } else if (IsX86 &&
2672 (Name == "avx512.kortestz.w" || Name == "avx512.kortestc.w")) {
2673 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
2674 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
2675 Rep = Builder.CreateOr(LHS, RHS);
2676 Rep = Builder.CreateBitCast(Rep, Builder.getInt16Ty());
2677 Value *C;
2678 if (Name[14] == 'c')
2679 C = ConstantInt::getAllOnesValue(Builder.getInt16Ty());
2680 else
2681 C = ConstantInt::getNullValue(Builder.getInt16Ty());
2682 Rep = Builder.CreateICmpEQ(Rep, C);
2683 Rep = Builder.CreateZExt(Rep, Builder.getInt32Ty());
2684 } else if (IsX86 && (Name == "sse.add.ss" || Name == "sse2.add.sd" ||
2685 Name == "sse.sub.ss" || Name == "sse2.sub.sd" ||
2686 Name == "sse.mul.ss" || Name == "sse2.mul.sd" ||
2687 Name == "sse.div.ss" || Name == "sse2.div.sd")) {
2688 Type *I32Ty = Type::getInt32Ty(C);
2689 Value *Elt0 = Builder.CreateExtractElement(CI->getArgOperand(0),
2690 ConstantInt::get(I32Ty, 0));
2691 Value *Elt1 = Builder.CreateExtractElement(CI->getArgOperand(1),
2692 ConstantInt::get(I32Ty, 0));
2693 Value *EltOp;
2694 if (Name.contains(".add."))
2695 EltOp = Builder.CreateFAdd(Elt0, Elt1);
2696 else if (Name.contains(".sub."))
2697 EltOp = Builder.CreateFSub(Elt0, Elt1);
2698 else if (Name.contains(".mul."))
2699 EltOp = Builder.CreateFMul(Elt0, Elt1);
2700 else
2701 EltOp = Builder.CreateFDiv(Elt0, Elt1);
2702 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), EltOp,
2703 ConstantInt::get(I32Ty, 0));
2704 } else if (IsX86 && Name.starts_with("avx512.mask.pcmp")) {
2705 // "avx512.mask.pcmpeq." or "avx512.mask.pcmpgt."
2706 bool CmpEq = Name[16] == 'e';
2707 Rep = upgradeMaskedCompare(Builder, *CI, CmpEq ? 0 : 6, true);
2708 } else if (IsX86 && Name.starts_with("avx512.mask.vpshufbitqmb.")) {
2709 Type *OpTy = CI->getArgOperand(0)->getType();
2710 unsigned VecWidth = OpTy->getPrimitiveSizeInBits();
2711 Intrinsic::ID IID;
2712 switch (VecWidth) {
2713 default: llvm_unreachable("Unexpected intrinsic");
2714 case 128: IID = Intrinsic::x86_avx512_vpshufbitqmb_128; break;
2715 case 256: IID = Intrinsic::x86_avx512_vpshufbitqmb_256; break;
2716 case 512: IID = Intrinsic::x86_avx512_vpshufbitqmb_512; break;
2717 }
2718
2719 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
2720 { CI->getOperand(0), CI->getArgOperand(1) });
2721 Rep = applyX86MaskOn1BitsVec(Builder, Rep, CI->getArgOperand(2));
2722 } else if (IsX86 && Name.starts_with("avx512.mask.fpclass.p")) {
2723 Type *OpTy = CI->getArgOperand(0)->getType();
2724 unsigned VecWidth = OpTy->getPrimitiveSizeInBits();
2725 unsigned EltWidth = OpTy->getScalarSizeInBits();
2726 Intrinsic::ID IID;
2727 if (VecWidth == 128 && EltWidth == 32)
2728 IID = Intrinsic::x86_avx512_fpclass_ps_128;
2729 else if (VecWidth == 256 && EltWidth == 32)
2730 IID = Intrinsic::x86_avx512_fpclass_ps_256;
2731 else if (VecWidth == 512 && EltWidth == 32)
2732 IID = Intrinsic::x86_avx512_fpclass_ps_512;
2733 else if (VecWidth == 128 && EltWidth == 64)
2734 IID = Intrinsic::x86_avx512_fpclass_pd_128;
2735 else if (VecWidth == 256 && EltWidth == 64)
2736 IID = Intrinsic::x86_avx512_fpclass_pd_256;
2737 else if (VecWidth == 512 && EltWidth == 64)
2738 IID = Intrinsic::x86_avx512_fpclass_pd_512;
2739 else
2740 llvm_unreachable("Unexpected intrinsic");
2741
2742 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
2743 { CI->getOperand(0), CI->getArgOperand(1) });
2744 Rep = applyX86MaskOn1BitsVec(Builder, Rep, CI->getArgOperand(2));
2745 } else if (IsX86 && Name.starts_with("avx512.cmp.p")) {
2746 SmallVector<Value *, 4> Args(CI->args());
2747 Type *OpTy = Args[0]->getType();
2748 unsigned VecWidth = OpTy->getPrimitiveSizeInBits();
2749 unsigned EltWidth = OpTy->getScalarSizeInBits();
2750 Intrinsic::ID IID;
2751 if (VecWidth == 128 && EltWidth == 32)
2752 IID = Intrinsic::x86_avx512_mask_cmp_ps_128;
2753 else if (VecWidth == 256 && EltWidth == 32)
2754 IID = Intrinsic::x86_avx512_mask_cmp_ps_256;
2755 else if (VecWidth == 512 && EltWidth == 32)
2756 IID = Intrinsic::x86_avx512_mask_cmp_ps_512;
2757 else if (VecWidth == 128 && EltWidth == 64)
2758 IID = Intrinsic::x86_avx512_mask_cmp_pd_128;
2759 else if (VecWidth == 256 && EltWidth == 64)
2760 IID = Intrinsic::x86_avx512_mask_cmp_pd_256;
2761 else if (VecWidth == 512 && EltWidth == 64)
2762 IID = Intrinsic::x86_avx512_mask_cmp_pd_512;
2763 else
2764 llvm_unreachable("Unexpected intrinsic");
2765
2767 if (VecWidth == 512)
2768 std::swap(Mask, Args.back());
2769 Args.push_back(Mask);
2770
2771 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
2772 Args);
2773 } else if (IsX86 && Name.starts_with("avx512.mask.cmp.")) {
2774 // Integer compare intrinsics.
2775 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
2776 Rep = upgradeMaskedCompare(Builder, *CI, Imm, true);
2777 } else if (IsX86 && Name.starts_with("avx512.mask.ucmp.")) {
2778 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
2779 Rep = upgradeMaskedCompare(Builder, *CI, Imm, false);
2780 } else if (IsX86 && (Name.starts_with("avx512.cvtb2mask.") ||
2781 Name.starts_with("avx512.cvtw2mask.") ||
2782 Name.starts_with("avx512.cvtd2mask.") ||
2783 Name.starts_with("avx512.cvtq2mask."))) {
2784 Value *Op = CI->getArgOperand(0);
2785 Value *Zero = llvm::Constant::getNullValue(Op->getType());
2786 Rep = Builder.CreateICmp(ICmpInst::ICMP_SLT, Op, Zero);
2787 Rep = applyX86MaskOn1BitsVec(Builder, Rep, nullptr);
2788 } else if(IsX86 && (Name == "ssse3.pabs.b.128" ||
2789 Name == "ssse3.pabs.w.128" ||
2790 Name == "ssse3.pabs.d.128" ||
2791 Name.starts_with("avx2.pabs") ||
2792 Name.starts_with("avx512.mask.pabs"))) {
2793 Rep = upgradeAbs(Builder, *CI);
2794 } else if (IsX86 && (Name == "sse41.pmaxsb" ||
2795 Name == "sse2.pmaxs.w" ||
2796 Name == "sse41.pmaxsd" ||
2797 Name.starts_with("avx2.pmaxs") ||
2798 Name.starts_with("avx512.mask.pmaxs"))) {
2799 Rep = upgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::smax);
2800 } else if (IsX86 && (Name == "sse2.pmaxu.b" ||
2801 Name == "sse41.pmaxuw" ||
2802 Name == "sse41.pmaxud" ||
2803 Name.starts_with("avx2.pmaxu") ||
2804 Name.starts_with("avx512.mask.pmaxu"))) {
2805 Rep = upgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::umax);
2806 } else if (IsX86 && (Name == "sse41.pminsb" ||
2807 Name == "sse2.pmins.w" ||
2808 Name == "sse41.pminsd" ||
2809 Name.starts_with("avx2.pmins") ||
2810 Name.starts_with("avx512.mask.pmins"))) {
2811 Rep = upgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::smin);
2812 } else if (IsX86 && (Name == "sse2.pminu.b" ||
2813 Name == "sse41.pminuw" ||
2814 Name == "sse41.pminud" ||
2815 Name.starts_with("avx2.pminu") ||
2816 Name.starts_with("avx512.mask.pminu"))) {
2817 Rep = upgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::umin);
2818 } else if (IsX86 && (Name == "sse2.pmulu.dq" ||
2819 Name == "avx2.pmulu.dq" ||
2820 Name == "avx512.pmulu.dq.512" ||
2821 Name.starts_with("avx512.mask.pmulu.dq."))) {
2822 Rep = upgradePMULDQ(Builder, *CI, /*Signed*/false);
2823 } else if (IsX86 && (Name == "sse41.pmuldq" ||
2824 Name == "avx2.pmul.dq" ||
2825 Name == "avx512.pmul.dq.512" ||
2826 Name.starts_with("avx512.mask.pmul.dq."))) {
2827 Rep = upgradePMULDQ(Builder, *CI, /*Signed*/true);
2828 } else if (IsX86 && (Name == "sse.cvtsi2ss" ||
2829 Name == "sse2.cvtsi2sd" ||
2830 Name == "sse.cvtsi642ss" ||
2831 Name == "sse2.cvtsi642sd")) {
2832 Rep = Builder.CreateSIToFP(
2833 CI->getArgOperand(1),
2834 cast<VectorType>(CI->getType())->getElementType());
2835 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0);
2836 } else if (IsX86 && Name == "avx512.cvtusi2sd") {
2837 Rep = Builder.CreateUIToFP(
2838 CI->getArgOperand(1),
2839 cast<VectorType>(CI->getType())->getElementType());
2840 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0);
2841 } else if (IsX86 && Name == "sse2.cvtss2sd") {
2842 Rep = Builder.CreateExtractElement(CI->getArgOperand(1), (uint64_t)0);
2843 Rep = Builder.CreateFPExt(
2844 Rep, cast<VectorType>(CI->getType())->getElementType());
2845 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0);
2846 } else if (IsX86 && (Name == "sse2.cvtdq2pd" ||
2847 Name == "sse2.cvtdq2ps" ||
2848 Name == "avx.cvtdq2.pd.256" ||
2849 Name == "avx.cvtdq2.ps.256" ||
2850 Name.starts_with("avx512.mask.cvtdq2pd.") ||
2851 Name.starts_with("avx512.mask.cvtudq2pd.") ||
2852 Name.starts_with("avx512.mask.cvtdq2ps.") ||
2853 Name.starts_with("avx512.mask.cvtudq2ps.") ||
2854 Name.starts_with("avx512.mask.cvtqq2pd.") ||
2855 Name.starts_with("avx512.mask.cvtuqq2pd.") ||
2856 Name == "avx512.mask.cvtqq2ps.256" ||
2857 Name == "avx512.mask.cvtqq2ps.512" ||
2858 Name == "avx512.mask.cvtuqq2ps.256" ||
2859 Name == "avx512.mask.cvtuqq2ps.512" ||
2860 Name == "sse2.cvtps2pd" ||
2861 Name == "avx.cvt.ps2.pd.256" ||
2862 Name == "avx512.mask.cvtps2pd.128" ||
2863 Name == "avx512.mask.cvtps2pd.256")) {
2864 auto *DstTy = cast<FixedVectorType>(CI->getType());
2865 Rep = CI->getArgOperand(0);
2866 auto *SrcTy = cast<FixedVectorType>(Rep->getType());
2867
2868 unsigned NumDstElts = DstTy->getNumElements();
2869 if (NumDstElts < SrcTy->getNumElements()) {
2870 assert(NumDstElts == 2 && "Unexpected vector size");
2871 Rep = Builder.CreateShuffleVector(Rep, Rep, ArrayRef<int>{0, 1});
2872 }
2873
2874 bool IsPS2PD = SrcTy->getElementType()->isFloatTy();
2875 bool IsUnsigned = Name.contains("cvtu");
2876 if (IsPS2PD)
2877 Rep = Builder.CreateFPExt(Rep, DstTy, "cvtps2pd");
2878 else if (CI->arg_size() == 4 &&
2879 (!isa<ConstantInt>(CI->getArgOperand(3)) ||
2880 cast<ConstantInt>(CI->getArgOperand(3))->getZExtValue() != 4)) {
2881 Intrinsic::ID IID = IsUnsigned ? Intrinsic::x86_avx512_uitofp_round
2882 : Intrinsic::x86_avx512_sitofp_round;
2884 { DstTy, SrcTy });
2885 Rep = Builder.CreateCall(F, { Rep, CI->getArgOperand(3) });
2886 } else {
2887 Rep = IsUnsigned ? Builder.CreateUIToFP(Rep, DstTy, "cvt")
2888 : Builder.CreateSIToFP(Rep, DstTy, "cvt");
2889 }
2890
2891 if (CI->arg_size() >= 3)
2892 Rep = emitX86Select(Builder, CI->getArgOperand(2), Rep,
2893 CI->getArgOperand(1));
2894 } else if (IsX86 && (Name.starts_with("avx512.mask.vcvtph2ps.") ||
2895 Name.starts_with("vcvtph2ps."))) {
2896 auto *DstTy = cast<FixedVectorType>(CI->getType());
2897 Rep = CI->getArgOperand(0);
2898 auto *SrcTy = cast<FixedVectorType>(Rep->getType());
2899 unsigned NumDstElts = DstTy->getNumElements();
2900 if (NumDstElts != SrcTy->getNumElements()) {
2901 assert(NumDstElts == 4 && "Unexpected vector size");
2902 Rep = Builder.CreateShuffleVector(Rep, Rep, ArrayRef<int>{0, 1, 2, 3});
2903 }
2904 Rep = Builder.CreateBitCast(
2905 Rep, FixedVectorType::get(Type::getHalfTy(C), NumDstElts));
2906 Rep = Builder.CreateFPExt(Rep, DstTy, "cvtph2ps");
2907 if (CI->arg_size() >= 3)
2908 Rep = emitX86Select(Builder, CI->getArgOperand(2), Rep,
2909 CI->getArgOperand(1));
2910 } else if (IsX86 && Name.starts_with("avx512.mask.load")) {
2911 // "avx512.mask.loadu." or "avx512.mask.load."
2912 bool Aligned = Name[16] != 'u'; // "avx512.mask.loadu".
2913 Rep =
2914 upgradeMaskedLoad(Builder, CI->getArgOperand(0), CI->getArgOperand(1),
2915 CI->getArgOperand(2), Aligned);
2916 } else if (IsX86 && Name.starts_with("avx512.mask.expand.load.")) {
2917 auto *ResultTy = cast<FixedVectorType>(CI->getType());
2918 Type *PtrTy = ResultTy->getElementType();
2919
2920 // Cast the pointer to element type.
2921 Value *Ptr = Builder.CreateBitCast(CI->getOperand(0),
2923
2924 Value *MaskVec = getX86MaskVec(Builder, CI->getArgOperand(2),
2925 ResultTy->getNumElements());
2926
2927 Function *ELd = Intrinsic::getDeclaration(F->getParent(),
2928 Intrinsic::masked_expandload,
2929 ResultTy);
2930 Rep = Builder.CreateCall(ELd, { Ptr, MaskVec, CI->getOperand(1) });
2931 } else if (IsX86 && Name.starts_with("avx512.mask.compress.store.")) {
2932 auto *ResultTy = cast<VectorType>(CI->getArgOperand(1)->getType());
2933 Type *PtrTy = ResultTy->getElementType();
2934
2935 // Cast the pointer to element type.
2936 Value *Ptr = Builder.CreateBitCast(CI->getOperand(0),
2938
2939 Value *MaskVec =
2940 getX86MaskVec(Builder, CI->getArgOperand(2),
2941 cast<FixedVectorType>(ResultTy)->getNumElements());
2942
2943 Function *CSt = Intrinsic::getDeclaration(F->getParent(),
2944 Intrinsic::masked_compressstore,
2945 ResultTy);
2946 Rep = Builder.CreateCall(CSt, { CI->getArgOperand(1), Ptr, MaskVec });
2947 } else if (IsX86 && (Name.starts_with("avx512.mask.compress.") ||
2948 Name.starts_with("avx512.mask.expand."))) {
2949 auto *ResultTy = cast<FixedVectorType>(CI->getType());
2950
2951 Value *MaskVec = getX86MaskVec(Builder, CI->getArgOperand(2),
2952 ResultTy->getNumElements());
2953
2954 bool IsCompress = Name[12] == 'c';
2955 Intrinsic::ID IID = IsCompress ? Intrinsic::x86_avx512_mask_compress
2956 : Intrinsic::x86_avx512_mask_expand;
2957 Function *Intr = Intrinsic::getDeclaration(F->getParent(), IID, ResultTy);
2958 Rep = Builder.CreateCall(Intr, { CI->getOperand(0), CI->getOperand(1),
2959 MaskVec });
2960 } else if (IsX86 && Name.starts_with("xop.vpcom")) {
2961 bool IsSigned;
2962 if (Name.ends_with("ub") || Name.ends_with("uw") || Name.ends_with("ud") ||
2963 Name.ends_with("uq"))
2964 IsSigned = false;
2965 else if (Name.ends_with("b") || Name.ends_with("w") || Name.ends_with("d") ||
2966 Name.ends_with("q"))
2967 IsSigned = true;
2968 else
2969 llvm_unreachable("Unknown suffix");
2970
2971 unsigned Imm;
2972 if (CI->arg_size() == 3) {
2973 Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
2974 } else {
2975 Name = Name.substr(9); // strip off "xop.vpcom"
2976 if (Name.starts_with("lt"))
2977 Imm = 0;
2978 else if (Name.starts_with("le"))
2979 Imm = 1;
2980 else if (Name.starts_with("gt"))
2981 Imm = 2;
2982 else if (Name.starts_with("ge"))
2983 Imm = 3;
2984 else if (Name.starts_with("eq"))
2985 Imm = 4;
2986 else if (Name.starts_with("ne"))
2987 Imm = 5;
2988 else if (Name.starts_with("false"))
2989 Imm = 6;
2990 else if (Name.starts_with("true"))
2991 Imm = 7;
2992 else
2993 llvm_unreachable("Unknown condition");
2994 }
2995
2996 Rep = upgradeX86vpcom(Builder, *CI, Imm, IsSigned);
2997 } else if (IsX86 && Name.starts_with("xop.vpcmov")) {
2998 Value *Sel = CI->getArgOperand(2);
2999 Value *NotSel = Builder.CreateNot(Sel);
3000 Value *Sel0 = Builder.CreateAnd(CI->getArgOperand(0), Sel);
3001 Value *Sel1 = Builder.CreateAnd(CI->getArgOperand(1), NotSel);
3002 Rep = Builder.CreateOr(Sel0, Sel1);
3003 } else if (IsX86 && (Name.starts_with("xop.vprot") ||
3004 Name.starts_with("avx512.prol") ||
3005 Name.starts_with("avx512.mask.prol"))) {
3006 Rep = upgradeX86Rotate(Builder, *CI, false);
3007 } else if (IsX86 && (Name.starts_with("avx512.pror") ||
3008 Name.starts_with("avx512.mask.pror"))) {
3009 Rep = upgradeX86Rotate(Builder, *CI, true);
3010 } else if (IsX86 && (Name.starts_with("avx512.vpshld.") ||
3011 Name.starts_with("avx512.mask.vpshld") ||
3012 Name.starts_with("avx512.maskz.vpshld"))) {
3013 bool ZeroMask = Name[11] == 'z';
3014 Rep = upgradeX86ConcatShift(Builder, *CI, false, ZeroMask);
3015 } else if (IsX86 && (Name.starts_with("avx512.vpshrd.") ||
3016 Name.starts_with("avx512.mask.vpshrd") ||
3017 Name.starts_with("avx512.maskz.vpshrd"))) {
3018 bool ZeroMask = Name[11] == 'z';
3019 Rep = upgradeX86ConcatShift(Builder, *CI, true, ZeroMask);
3020 } else if (IsX86 && Name == "sse42.crc32.64.8") {
3021 Function *CRC32 = Intrinsic::getDeclaration(F->getParent(),
3022 Intrinsic::x86_sse42_crc32_32_8);
3023 Value *Trunc0 = Builder.CreateTrunc(CI->getArgOperand(0), Type::getInt32Ty(C));
3024 Rep = Builder.CreateCall(CRC32, {Trunc0, CI->getArgOperand(1)});
3025 Rep = Builder.CreateZExt(Rep, CI->getType(), "");
3026 } else if (IsX86 && (Name.starts_with("avx.vbroadcast.s") ||
3027 Name.starts_with("avx512.vbroadcast.s"))) {
3028 // Replace broadcasts with a series of insertelements.
3029 auto *VecTy = cast<FixedVectorType>(CI->getType());
3030 Type *EltTy = VecTy->getElementType();
3031 unsigned EltNum = VecTy->getNumElements();
3032 Value *Load = Builder.CreateLoad(EltTy, CI->getArgOperand(0));
3033 Type *I32Ty = Type::getInt32Ty(C);
3034 Rep = PoisonValue::get(VecTy);
3035 for (unsigned I = 0; I < EltNum; ++I)
3036 Rep = Builder.CreateInsertElement(Rep, Load,
3037 ConstantInt::get(I32Ty, I));
3038 } else if (IsX86 && (Name.starts_with("sse41.pmovsx") ||
3039 Name.starts_with("sse41.pmovzx") ||
3040 Name.starts_with("avx2.pmovsx") ||
3041 Name.starts_with("avx2.pmovzx") ||
3042 Name.starts_with("avx512.mask.pmovsx") ||
3043 Name.starts_with("avx512.mask.pmovzx"))) {
3044 auto *DstTy = cast<FixedVectorType>(CI->getType());
3045 unsigned NumDstElts = DstTy->getNumElements();
3046
3047 // Extract a subvector of the first NumDstElts lanes and sign/zero extend.
3048 SmallVector<int, 8> ShuffleMask(NumDstElts);
3049 for (unsigned i = 0; i != NumDstElts; ++i)
3050 ShuffleMask[i] = i;
3051
3052 Value *SV =
3053 Builder.CreateShuffleVector(CI->getArgOperand(0), ShuffleMask);
3054
3055 bool DoSext = Name.contains("pmovsx");
3056 Rep = DoSext ? Builder.CreateSExt(SV, DstTy)
3057 : Builder.CreateZExt(SV, DstTy);
3058 // If there are 3 arguments, it's a masked intrinsic so we need a select.
3059 if (CI->arg_size() == 3)
3060 Rep = emitX86Select(Builder, CI->getArgOperand(2), Rep,
3061 CI->getArgOperand(1));
3062 } else if (Name == "avx512.mask.pmov.qd.256" ||
3063 Name == "avx512.mask.pmov.qd.512" ||
3064 Name == "avx512.mask.pmov.wb.256" ||
3065 Name == "avx512.mask.pmov.wb.512") {
3066 Type *Ty = CI->getArgOperand(1)->getType();
3067 Rep = Builder.CreateTrunc(CI->getArgOperand(0), Ty);
3068 Rep = emitX86Select(Builder, CI->getArgOperand(2), Rep,
3069 CI->getArgOperand(1));
3070 } else if (IsX86 && (Name.starts_with("avx.vbroadcastf128") ||
3071 Name == "avx2.vbroadcasti128")) {
3072 // Replace vbroadcastf128/vbroadcasti128 with a vector load+shuffle.
3073 Type *EltTy = cast<VectorType>(CI->getType())->getElementType();
3074 unsigned NumSrcElts = 128 / EltTy->getPrimitiveSizeInBits();
3075 auto *VT = FixedVectorType::get(EltTy, NumSrcElts);
3076 Value *Op = Builder.CreatePointerCast(CI->getArgOperand(0),
3077 PointerType::getUnqual(VT));
3078 Value *Load = Builder.CreateAlignedLoad(VT, Op, Align(1));
3079 if (NumSrcElts == 2)
3080 Rep = Builder.CreateShuffleVector(Load, ArrayRef<int>{0, 1, 0, 1});
3081 else
3082 Rep = Builder.CreateShuffleVector(
3083 Load, ArrayRef<int>{0, 1, 2, 3, 0, 1, 2, 3});
3084 } else if (IsX86 && (Name.starts_with("avx512.mask.shuf.i") ||
3085 Name.starts_with("avx512.mask.shuf.f"))) {
3086 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
3087 Type *VT = CI->getType();
3088 unsigned NumLanes = VT->getPrimitiveSizeInBits() / 128;
3089 unsigned NumElementsInLane = 128 / VT->getScalarSizeInBits();
3090 unsigned ControlBitsMask = NumLanes - 1;
3091 unsigned NumControlBits = NumLanes / 2;
3092 SmallVector<int, 8> ShuffleMask(0);
3093
3094 for (unsigned l = 0; l != NumLanes; ++l) {
3095 unsigned LaneMask = (Imm >> (l * NumControlBits)) & ControlBitsMask;
3096 // We actually need the other source.
3097 if (l >= NumLanes / 2)
3098 LaneMask += NumLanes;
3099 for (unsigned i = 0; i != NumElementsInLane; ++i)
3100 ShuffleMask.push_back(LaneMask * NumElementsInLane + i);
3101 }
3102 Rep = Builder.CreateShuffleVector(CI->getArgOperand(0),
3103 CI->getArgOperand(1), ShuffleMask);
3104 Rep = emitX86Select(Builder, CI->getArgOperand(4), Rep,
3105 CI->getArgOperand(3));
3106 }else if (IsX86 && (Name.starts_with("avx512.mask.broadcastf") ||
3107 Name.starts_with("avx512.mask.broadcasti"))) {
3108 unsigned NumSrcElts =
3109 cast<FixedVectorType>(CI->getArgOperand(0)->getType())
3110 ->getNumElements();
3111 unsigned NumDstElts =
3112 cast<FixedVectorType>(CI->getType())->getNumElements();
3113
3114 SmallVector<int, 8> ShuffleMask(NumDstElts);
3115 for (unsigned i = 0; i != NumDstElts; ++i)
3116 ShuffleMask[i] = i % NumSrcElts;
3117
3118 Rep = Builder.CreateShuffleVector(CI->getArgOperand(0),
3119 CI->getArgOperand(0),
3120 ShuffleMask);
3121 Rep = emitX86Select(Builder, CI->getArgOperand(2), Rep,
3122 CI->getArgOperand(1));
3123 } else if (IsX86 && (Name.starts_with("avx2.pbroadcast") ||
3124 Name.starts_with("avx2.vbroadcast") ||
3125 Name.starts_with("avx512.pbroadcast") ||
3126 Name.starts_with("avx512.mask.broadcast.s"))) {
3127 // Replace vp?broadcasts with a vector shuffle.
3128 Value *Op = CI->getArgOperand(0);
3129 ElementCount EC = cast<VectorType>(CI->getType())->getElementCount();
3130 Type *MaskTy = VectorType::get(Type::getInt32Ty(C), EC);
3133 Rep = Builder.CreateShuffleVector(Op, M);
3134
3135 if (CI->arg_size() == 3)
3136 Rep = emitX86Select(Builder, CI->getArgOperand(2), Rep,
3137 CI->getArgOperand(1));
3138 } else if (IsX86 && (Name.starts_with("sse2.padds.") ||
3139 Name.starts_with("avx2.padds.") ||
3140 Name.starts_with("avx512.padds.") ||
3141 Name.starts_with("avx512.mask.padds."))) {
3142 Rep = upgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::sadd_sat);
3143 } else if (IsX86 && (Name.starts_with("sse2.psubs.") ||
3144 Name.starts_with("avx2.psubs.") ||
3145 Name.starts_with("avx512.psubs.") ||
3146 Name.starts_with("avx512.mask.psubs."))) {
3147 Rep = upgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::ssub_sat);
3148 } else if (IsX86 && (Name.starts_with("sse2.paddus.") ||
3149 Name.starts_with("avx2.paddus.") ||
3150 Name.starts_with("avx512.mask.paddus."))) {
3151 Rep = upgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::uadd_sat);
3152 } else if (IsX86 && (Name.starts_with("sse2.psubus.") ||
3153 Name.starts_with("avx2.psubus.") ||
3154 Name.starts_with("avx512.mask.psubus."))) {
3155 Rep = upgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::usub_sat);
3156 } else if (IsX86 && Name.starts_with("avx512.mask.palignr.")) {
3158 Builder, CI->getArgOperand(0), CI->getArgOperand(1),
3159 CI->getArgOperand(2), CI->getArgOperand(3), CI->getArgOperand(4),
3160 false);
3161 } else if (IsX86 && Name.starts_with("avx512.mask.valign.")) {
3163 Builder, CI->getArgOperand(0), CI->getArgOperand(1),
3164 CI->getArgOperand(2), CI->getArgOperand(3), CI->getArgOperand(4),
3165 true);
3166 } else if (IsX86 && (Name == "sse2.psll.dq" ||
3167 Name == "avx2.psll.dq")) {
3168 // 128/256-bit shift left specified in bits.
3169 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
3170 Rep = upgradeX86PSLLDQIntrinsics(Builder, CI->getArgOperand(0),
3171 Shift / 8); // Shift is in bits.
3172 } else if (IsX86 && (Name == "sse2.psrl.dq" ||
3173 Name == "avx2.psrl.dq")) {
3174 // 128/256-bit shift right specified in bits.
3175 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
3176 Rep = upgradeX86PSRLDQIntrinsics(Builder, CI->getArgOperand(0),
3177 Shift / 8); // Shift is in bits.
3178 } else if (IsX86 && (Name == "sse2.psll.dq.bs" ||
3179 Name == "avx2.psll.dq.bs" ||
3180 Name == "avx512.psll.dq.512")) {
3181 // 128/256/512-bit shift left specified in bytes.
3182 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
3183 Rep = upgradeX86PSLLDQIntrinsics(Builder, CI->getArgOperand(0), Shift);
3184 } else if (IsX86 && (Name == "sse2.psrl.dq.bs" ||
3185 Name == "avx2.psrl.dq.bs" ||
3186 Name == "avx512.psrl.dq.512")) {
3187 // 128/256/512-bit shift right specified in bytes.
3188 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
3189 Rep = upgradeX86PSRLDQIntrinsics(Builder, CI->getArgOperand(0), Shift);
3190 } else if (IsX86 && (Name == "sse41.pblendw" ||
3191 Name.starts_with("sse41.blendp") ||
3192 Name.starts_with("avx.blend.p") ||
3193 Name == "avx2.pblendw" ||
3194 Name.starts_with("avx2.pblendd."))) {
3195 Value *Op0 = CI->getArgOperand(0);
3196 Value *Op1 = CI->getArgOperand(1);
3197 unsigned Imm = cast <ConstantInt>(CI->getArgOperand(2))->getZExtValue();
3198 auto *VecTy = cast<FixedVectorType>(CI->getType());
3199 unsigned NumElts = VecTy->getNumElements();
3200
3201 SmallVector<int, 16> Idxs(NumElts);
3202 for (unsigned i = 0; i != NumElts; ++i)
3203 Idxs[i] = ((Imm >> (i%8)) & 1) ? i + NumElts : i;
3204
3205 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
3206 } else if (IsX86 && (Name.starts_with("avx.vinsertf128.") ||
3207 Name == "avx2.vinserti128" ||
3208 Name.starts_with("avx512.mask.insert"))) {
3209 Value *Op0 = CI->getArgOperand(0);
3210 Value *Op1 = CI->getArgOperand(1);
3211 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
3212 unsigned DstNumElts =
3213 cast<FixedVectorType>(CI->getType())->getNumElements();
3214 unsigned SrcNumElts =
3215 cast<FixedVectorType>(Op1->getType())->getNumElements();
3216 unsigned Scale = DstNumElts / SrcNumElts;
3217
3218 // Mask off the high bits of the immediate value; hardware ignores those.
3219 Imm = Imm % Scale;
3220
3221 // Extend the second operand into a vector the size of the destination.
3222 SmallVector<int, 8> Idxs(DstNumElts);
3223 for (unsigned i = 0; i != SrcNumElts; ++i)
3224 Idxs[i] = i;
3225 for (unsigned i = SrcNumElts; i != DstNumElts; ++i)
3226 Idxs[i] = SrcNumElts;
3227 Rep = Builder.CreateShuffleVector(Op1, Idxs);
3228
3229 // Insert the second operand into the first operand.
3230
3231 // Note that there is no guarantee that instruction lowering will actually
3232 // produce a vinsertf128 instruction for the created shuffles. In
3233 // particular, the 0 immediate case involves no lane changes, so it can
3234 // be handled as a blend.
3235
3236 // Example of shuffle mask for 32-bit elements:
3237 // Imm = 1 <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
3238 // Imm = 0 <i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7 >
3239
3240 // First fill with identify mask.
3241 for (unsigned i = 0; i != DstNumElts; ++i)
3242 Idxs[i] = i;
3243 // Then replace the elements where we need to insert.
3244 for (unsigned i = 0; i != SrcNumElts; ++i)
3245 Idxs[i + Imm * SrcNumElts] = i + DstNumElts;
3246 Rep = Builder.CreateShuffleVector(Op0, Rep, Idxs);
3247
3248 // If the intrinsic has a mask operand, handle that.
3249 if (CI->arg_size() == 5)
3250 Rep = emitX86Select(Builder, CI->getArgOperand(4), Rep,
3251 CI->getArgOperand(3));
3252 } else if (IsX86 && (Name.starts_with("avx.vextractf128.") ||
3253 Name == "avx2.vextracti128" ||
3254 Name.starts_with("avx512.mask.vextract"))) {
3255 Value *Op0 = CI->getArgOperand(0);
3256 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
3257 unsigned DstNumElts =
3258 cast<FixedVectorType>(CI->getType())->getNumElements();
3259 unsigned SrcNumElts =
3260 cast<FixedVectorType>(Op0->getType())->getNumElements();
3261 unsigned Scale = SrcNumElts / DstNumElts;
3262
3263 // Mask off the high bits of the immediate value; hardware ignores those.
3264 Imm = Imm % Scale;
3265
3266 // Get indexes for the subvector of the input vector.
3267 SmallVector<int, 8> Idxs(DstNumElts);
3268 for (unsigned i = 0; i != DstNumElts; ++i) {
3269 Idxs[i] = i + (Imm * DstNumElts);
3270 }
3271 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
3272
3273 // If the intrinsic has a mask operand, handle that.
3274 if (CI->arg_size() == 4)
3275 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3276 CI->getArgOperand(2));
3277 } else if (!IsX86 && Name == "stackprotectorcheck") {
3278 Rep = nullptr;
3279 } else if (IsX86 && (Name.starts_with("avx512.mask.perm.df.") ||
3280 Name.starts_with("avx512.mask.perm.di."))) {
3281 Value *Op0 = CI->getArgOperand(0);
3282 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
3283 auto *VecTy = cast<FixedVectorType>(CI->getType());
3284 unsigned NumElts = VecTy->getNumElements();
3285
3286 SmallVector<int, 8> Idxs(NumElts);
3287 for (unsigned i = 0; i != NumElts; ++i)
3288 Idxs[i] = (i & ~0x3) + ((Imm >> (2 * (i & 0x3))) & 3);
3289
3290 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
3291
3292 if (CI->arg_size() == 4)
3293 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3294 CI->getArgOperand(2));
3295 } else if (IsX86 && (Name.starts_with("avx.vperm2f128.") ||
3296 Name == "avx2.vperm2i128")) {
3297 // The immediate permute control byte looks like this:
3298 // [1:0] - select 128 bits from sources for low half of destination
3299 // [2] - ignore
3300 // [3] - zero low half of destination
3301 // [5:4] - select 128 bits from sources for high half of destination
3302 // [6] - ignore
3303 // [7] - zero high half of destination
3304
3305 uint8_t Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
3306
3307 unsigned NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
3308 unsigned HalfSize = NumElts / 2;
3309 SmallVector<int, 8> ShuffleMask(NumElts);
3310
3311 // Determine which operand(s) are actually in use for this instruction.
3312 Value *V0 = (Imm & 0x02) ? CI->getArgOperand(1) : CI->getArgOperand(0);
3313 Value *V1 = (Imm & 0x20) ? CI->getArgOperand(1) : CI->getArgOperand(0);
3314
3315 // If needed, replace operands based on zero mask.
3316 V0 = (Imm & 0x08) ? ConstantAggregateZero::get(CI->getType()) : V0;
3317 V1 = (Imm & 0x80) ? ConstantAggregateZero::get(CI->getType()) : V1;
3318
3319 // Permute low half of result.
3320 unsigned StartIndex = (Imm & 0x01) ? HalfSize : 0;
3321 for (unsigned i = 0; i < HalfSize; ++i)
3322 ShuffleMask[i] = StartIndex + i;
3323
3324 // Permute high half of result.
3325 StartIndex = (Imm & 0x10) ? HalfSize : 0;
3326 for (unsigned i = 0; i < HalfSize; ++i)
3327 ShuffleMask[i + HalfSize] = NumElts + StartIndex + i;
3328
3329 Rep = Builder.CreateShuffleVector(V0, V1, ShuffleMask);
3330
3331 } else if (IsX86 && (Name.starts_with("avx.vpermil.") ||
3332 Name == "sse2.pshuf.d" ||
3333 Name.starts_with("avx512.mask.vpermil.p") ||
3334 Name.starts_with("avx512.mask.pshuf.d."))) {
3335 Value *Op0 = CI->getArgOperand(0);
3336 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
3337 auto *VecTy = cast<FixedVectorType>(CI->getType());
3338 unsigned NumElts = VecTy->getNumElements();
3339 // Calculate the size of each index in the immediate.
3340 unsigned IdxSize = 64 / VecTy->getScalarSizeInBits();
3341 unsigned IdxMask = ((1 << IdxSize) - 1);
3342
3343 SmallVector<int, 8> Idxs(NumElts);
3344 // Lookup the bits for this element, wrapping around the immediate every
3345 // 8-bits. Elements are grouped into sets of 2 or 4 elements so we need
3346 // to offset by the first index of each group.
3347 for (unsigned i = 0; i != NumElts; ++i)
3348 Idxs[i] = ((Imm >> ((i * IdxSize) % 8)) & IdxMask) | (i & ~IdxMask);
3349
3350 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
3351
3352 if (CI->arg_size() == 4)
3353 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3354 CI->getArgOperand(2));
3355 } else if (IsX86 && (Name == "sse2.pshufl.w" ||
3356 Name.starts_with("avx512.mask.pshufl.w."))) {
3357 Value *Op0 = CI->getArgOperand(0);
3358 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
3359 unsigned NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
3360
3361 SmallVector<int, 16> Idxs(NumElts);
3362 for (unsigned l = 0; l != NumElts; l += 8) {
3363 for (unsigned i = 0; i != 4; ++i)
3364 Idxs[i + l] = ((Imm >> (2 * i)) & 0x3) + l;
3365 for (unsigned i = 4; i != 8; ++i)
3366 Idxs[i + l] = i + l;
3367 }
3368
3369 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
3370
3371 if (CI->arg_size() == 4)
3372 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3373 CI->getArgOperand(2));
3374 } else if (IsX86 && (Name == "sse2.pshufh.w" ||
3375 Name.starts_with("avx512.mask.pshufh.w."))) {
3376 Value *Op0 = CI->getArgOperand(0);
3377 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
3378 unsigned NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
3379
3380 SmallVector<int, 16> Idxs(NumElts);
3381 for (unsigned l = 0; l != NumElts; l += 8) {
3382 for (unsigned i = 0; i != 4; ++i)
3383 Idxs[i + l] = i + l;
3384 for (unsigned i = 0; i != 4; ++i)
3385 Idxs[i + l + 4] = ((Imm >> (2 * i)) & 0x3) + 4 + l;
3386 }
3387
3388 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
3389
3390 if (CI->arg_size() == 4)
3391 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3392 CI->getArgOperand(2));
3393 } else if (IsX86 && Name.starts_with("avx512.mask.shuf.p")) {
3394 Value *Op0 = CI->getArgOperand(0);
3395 Value *Op1 = CI->getArgOperand(1);
3396 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
3397 unsigned NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
3398
3399 unsigned NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
3400 unsigned HalfLaneElts = NumLaneElts / 2;
3401
3402 SmallVector<int, 16> Idxs(NumElts);
3403 for (unsigned i = 0; i != NumElts; ++i) {
3404 // Base index is the starting element of the lane.
3405 Idxs[i] = i - (i % NumLaneElts);
3406 // If we are half way through the lane switch to the other source.
3407 if ((i % NumLaneElts) >= HalfLaneElts)
3408 Idxs[i] += NumElts;
3409 // Now select the specific element. By adding HalfLaneElts bits from
3410 // the immediate. Wrapping around the immediate every 8-bits.
3411 Idxs[i] += (Imm >> ((i * HalfLaneElts) % 8)) & ((1 << HalfLaneElts) - 1);
3412 }
3413
3414 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
3415
3416 Rep = emitX86Select(Builder, CI->getArgOperand(4), Rep,
3417 CI->getArgOperand(3));
3418 } else if (IsX86 && (Name.starts_with("avx512.mask.movddup") ||
3419 Name.starts_with("avx512.mask.movshdup") ||
3420 Name.starts_with("avx512.mask.movsldup"))) {
3421 Value *Op0 = CI->getArgOperand(0);
3422 unsigned NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
3423 unsigned NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
3424
3425 unsigned Offset = 0;
3426 if (Name.starts_with("avx512.mask.movshdup."))
3427 Offset = 1;
3428
3429 SmallVector<int, 16> Idxs(NumElts);
3430 for (unsigned l = 0; l != NumElts; l += NumLaneElts)
3431 for (unsigned i = 0; i != NumLaneElts; i += 2) {
3432 Idxs[i + l + 0] = i + l + Offset;
3433 Idxs[i + l + 1] = i + l + Offset;
3434 }
3435
3436 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
3437
3438 Rep = emitX86Select(Builder, CI->getArgOperand(2), Rep,
3439 CI->getArgOperand(1));
3440 } else if (IsX86 && (Name.starts_with("avx512.mask.punpckl") ||
3441 Name.starts_with("avx512.mask.unpckl."))) {
3442 Value *Op0 = CI->getArgOperand(0);
3443 Value *Op1 = CI->getArgOperand(1);
3444 int NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
3445 int NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
3446
3447 SmallVector<int, 64> Idxs(NumElts);
3448 for (int l = 0; l != NumElts; l += NumLaneElts)
3449 for (int i = 0; i != NumLaneElts; ++i)
3450 Idxs[i + l] = l + (i / 2) + NumElts * (i % 2);
3451
3452 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
3453
3454 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3455 CI->getArgOperand(2));
3456 } else if (IsX86 && (Name.starts_with("avx512.mask.punpckh") ||
3457 Name.starts_with("avx512.mask.unpckh."))) {
3458 Value *Op0 = CI->getArgOperand(0);
3459 Value *Op1 = CI->getArgOperand(1);
3460 int NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
3461 int NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
3462
3463 SmallVector<int, 64> Idxs(NumElts);
3464 for (int l = 0; l != NumElts; l += NumLaneElts)
3465 for (int i = 0; i != NumLaneElts; ++i)
3466 Idxs[i + l] = (NumLaneElts / 2) + l + (i / 2) + NumElts * (i % 2);
3467
3468 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
3469
3470 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3471 CI->getArgOperand(2));
3472 } else if (IsX86 && (Name.starts_with("avx512.mask.and.") ||
3473 Name.starts_with("avx512.mask.pand."))) {
3474 VectorType *FTy = cast<VectorType>(CI->getType());
3475 VectorType *ITy = VectorType::getInteger(FTy);
3476 Rep = Builder.CreateAnd(Builder.CreateBitCast(CI->getArgOperand(0), ITy),
3477 Builder.CreateBitCast(CI->getArgOperand(1), ITy));
3478 Rep = Builder.CreateBitCast(Rep, FTy);
3479 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3480 CI->getArgOperand(2));
3481 } else if (IsX86 && (Name.starts_with("avx512.mask.andn.") ||
3482 Name.starts_with("avx512.mask.pandn."))) {
3483 VectorType *FTy = cast<VectorType>(CI->getType());
3484 VectorType *ITy = VectorType::getInteger(FTy);
3485 Rep = Builder.CreateNot(Builder.CreateBitCast(CI->getArgOperand(0), ITy));
3486 Rep = Builder.CreateAnd(Rep,
3487 Builder.CreateBitCast(CI->getArgOperand(1), ITy));
3488 Rep = Builder.CreateBitCast(Rep, FTy);
3489 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3490 CI->getArgOperand(2));
3491 } else if (IsX86 && (Name.starts_with("avx512.mask.or.") ||
3492 Name.starts_with("avx512.mask.por."))) {
3493 VectorType *FTy = cast<VectorType>(CI->getType());
3494 VectorType *ITy = VectorType::getInteger(FTy);
3495 Rep = Builder.CreateOr(Builder.CreateBitCast(CI->getArgOperand(0), ITy),
3496 Builder.CreateBitCast(CI->getArgOperand(1), ITy));
3497 Rep = Builder.CreateBitCast(Rep, FTy);
3498 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3499 CI->getArgOperand(2));
3500 } else if (IsX86 && (Name.starts_with("avx512.mask.xor.") ||
3501 Name.starts_with("avx512.mask.pxor."))) {
3502 VectorType *FTy = cast<VectorType>(CI->getType());
3503 VectorType *ITy = VectorType::getInteger(FTy);
3504 Rep = Builder.CreateXor(Builder.CreateBitCast(CI->getArgOperand(0), ITy),
3505 Builder.CreateBitCast(CI->getArgOperand(1), ITy));
3506 Rep = Builder.CreateBitCast(Rep, FTy);
3507 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3508 CI->getArgOperand(2));
3509 } else if (IsX86 && Name.starts_with("avx512.mask.padd.")) {
3510 Rep = Builder.CreateAdd(CI->getArgOperand(0), CI->getArgOperand(1));
3511 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3512 CI->getArgOperand(2));
3513 } else if (IsX86 && Name.starts_with("avx512.mask.psub.")) {
3514 Rep = Builder.CreateSub(CI->getArgOperand(0), CI->getArgOperand(1));
3515 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3516 CI->getArgOperand(2));
3517 } else if (IsX86 && Name.starts_with("avx512.mask.pmull.")) {
3518 Rep = Builder.CreateMul(CI->getArgOperand(0), CI->getArgOperand(1));
3519 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3520 CI->getArgOperand(2));
3521 } else if (IsX86 && Name.starts_with("avx512.mask.add.p")) {
3522 if (Name.ends_with(".512")) {
3523 Intrinsic::ID IID;
3524 if (Name[17] == 's')
3525 IID = Intrinsic::x86_avx512_add_ps_512;
3526 else
3527 IID = Intrinsic::x86_avx512_add_pd_512;
3528
3529 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
3530 { CI->getArgOperand(0), CI->getArgOperand(1),
3531 CI->getArgOperand(4) });
3532 } else {
3533 Rep = Builder.CreateFAdd(CI->getArgOperand(0), CI->getArgOperand(1));
3534 }
3535 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3536 CI->getArgOperand(2));
3537 } else if (IsX86 && Name.starts_with("avx512.mask.div.p")) {
3538 if (Name.ends_with(".512")) {
3539 Intrinsic::ID IID;
3540 if (Name[17] == 's')
3541 IID = Intrinsic::x86_avx512_div_ps_512;
3542 else
3543 IID = Intrinsic::x86_avx512_div_pd_512;
3544
3545 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
3546 { CI->getArgOperand(0), CI->getArgOperand(1),
3547 CI->getArgOperand(4) });
3548 } else {
3549 Rep = Builder.CreateFDiv(CI->getArgOperand(0), CI->getArgOperand(1));
3550 }
3551 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3552 CI->getArgOperand(2));
3553 } else if (IsX86 && Name.starts_with("avx512.mask.mul.p")) {
3554 if (Name.ends_with(".512")) {
3555 Intrinsic::ID IID;
3556 if (Name[17] == 's')
3557 IID = Intrinsic::x86_avx512_mul_ps_512;
3558 else
3559 IID = Intrinsic::x86_avx512_mul_pd_512;
3560
3561 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
3562 { CI->getArgOperand(0), CI->getArgOperand(1),
3563 CI->getArgOperand(4) });
3564 } else {
3565 Rep = Builder.CreateFMul(CI->getArgOperand(0), CI->getArgOperand(1));
3566 }
3567 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3568 CI->getArgOperand(2));
3569 } else if (IsX86 && Name.starts_with("avx512.mask.sub.p")) {
3570 if (Name.ends_with(".512")) {
3571 Intrinsic::ID IID;
3572 if (Name[17] == 's')
3573 IID = Intrinsic::x86_avx512_sub_ps_512;
3574 else
3575 IID = Intrinsic::x86_avx512_sub_pd_512;
3576
3577 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
3578 { CI->getArgOperand(0), CI->getArgOperand(1),
3579 CI->getArgOperand(4) });
3580 } else {
3581 Rep = Builder.CreateFSub(CI->getArgOperand(0), CI->getArgOperand(1));
3582 }
3583 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3584 CI->getArgOperand(2));
3585 } else if (IsX86 && (Name.starts_with("avx512.mask.max.p") ||
3586 Name.starts_with("avx512.mask.min.p")) &&
3587 Name.drop_front(18) == ".512") {
3588 bool IsDouble = Name[17] == 'd';
3589 bool IsMin = Name[13] == 'i';
3590 static const Intrinsic::ID MinMaxTbl[2][2] = {
3591 { Intrinsic::x86_avx512_max_ps_512, Intrinsic::x86_avx512_max_pd_512 },
3592 { Intrinsic::x86_avx512_min_ps_512, Intrinsic::x86_avx512_min_pd_512 }
3593 };
3594 Intrinsic::ID IID = MinMaxTbl[IsMin][IsDouble];
3595
3596 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
3597 { CI->getArgOperand(0), CI->getArgOperand(1),
3598 CI->getArgOperand(4) });
3599 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3600 CI->getArgOperand(2));
3601 } else if (IsX86 && Name.starts_with("avx512.mask.lzcnt.")) {
3602 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(),
3603 Intrinsic::ctlz,
3604 CI->getType()),
3605 { CI->getArgOperand(0), Builder.getInt1(false) });
3606 Rep = emitX86Select(Builder, CI->getArgOperand(2), Rep,
3607 CI->getArgOperand(1));
3608 } else if (IsX86 && Name.starts_with("avx512.mask.psll")) {
3609 bool IsImmediate = Name[16] == 'i' ||
3610 (Name.size() > 18 && Name[18] == 'i');
3611 bool IsVariable = Name[16] == 'v';
3612 char Size = Name[16] == '.' ? Name[17] :
3613 Name[17] == '.' ? Name[18] :
3614 Name[18] == '.' ? Name[19] :
3615 Name[20];
3616
3617 Intrinsic::ID IID;
3618 if (IsVariable && Name[17] != '.') {
3619 if (Size == 'd' && Name[17] == '2') // avx512.mask.psllv2.di
3620 IID = Intrinsic::x86_avx2_psllv_q;
3621 else if (Size == 'd' && Name[17] == '4') // avx512.mask.psllv4.di
3622 IID = Intrinsic::x86_avx2_psllv_q_256;
3623 else if (Size == 's' && Name[17] == '4') // avx512.mask.psllv4.si
3624 IID = Intrinsic::x86_avx2_psllv_d;
3625 else if (Size == 's' && Name[17] == '8') // avx512.mask.psllv8.si
3626 IID = Intrinsic::x86_avx2_psllv_d_256;
3627 else if (Size == 'h' && Name[17] == '8') // avx512.mask.psllv8.hi
3628 IID = Intrinsic::x86_avx512_psllv_w_128;
3629 else if (Size == 'h' && Name[17] == '1') // avx512.mask.psllv16.hi
3630 IID = Intrinsic::x86_avx512_psllv_w_256;
3631 else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psllv32hi
3632 IID = Intrinsic::x86_avx512_psllv_w_512;
3633 else
3634 llvm_unreachable("Unexpected size");
3635 } else if (Name.ends_with(".128")) {
3636 if (Size == 'd') // avx512.mask.psll.d.128, avx512.mask.psll.di.128
3637 IID = IsImmediate ? Intrinsic::x86_sse2_pslli_d
3638 : Intrinsic::x86_sse2_psll_d;
3639 else if (Size == 'q') // avx512.mask.psll.q.128, avx512.mask.psll.qi.128
3640 IID = IsImmediate ? Intrinsic::x86_sse2_pslli_q
3641 : Intrinsic::x86_sse2_psll_q;
3642 else if (Size == 'w') // avx512.mask.psll.w.128, avx512.mask.psll.wi.128
3643 IID = IsImmediate ? Intrinsic::x86_sse2_pslli_w
3644 : Intrinsic::x86_sse2_psll_w;
3645 else
3646 llvm_unreachable("Unexpected size");
3647 } else if (Name.ends_with(".256")) {
3648 if (Size == 'd') // avx512.mask.psll.d.256, avx512.mask.psll.di.256
3649 IID = IsImmediate ? Intrinsic::x86_avx2_pslli_d
3650 : Intrinsic::x86_avx2_psll_d;
3651 else if (Size == 'q') // avx512.mask.psll.q.256, avx512.mask.psll.qi.256
3652 IID = IsImmediate ? Intrinsic::x86_avx2_pslli_q
3653 : Intrinsic::x86_avx2_psll_q;
3654 else if (Size == 'w') // avx512.mask.psll.w.256, avx512.mask.psll.wi.256
3655 IID = IsImmediate ? Intrinsic::x86_avx2_pslli_w
3656 : Intrinsic::x86_avx2_psll_w;
3657 else
3658 llvm_unreachable("Unexpected size");
3659 } else {
3660 if (Size == 'd') // psll.di.512, pslli.d, psll.d, psllv.d.512
3661 IID = IsImmediate ? Intrinsic::x86_avx512_pslli_d_512 :
3662 IsVariable ? Intrinsic::x86_avx512_psllv_d_512 :
3663 Intrinsic::x86_avx512_psll_d_512;
3664 else if (Size == 'q') // psll.qi.512, pslli.q, psll.q, psllv.q.512
3665 IID = IsImmediate ? Intrinsic::x86_avx512_pslli_q_512 :
3666 IsVariable ? Intrinsic::x86_avx512_psllv_q_512 :
3667 Intrinsic::x86_avx512_psll_q_512;
3668 else if (Size == 'w') // psll.wi.512, pslli.w, psll.w
3669 IID = IsImmediate ? Intrinsic::x86_avx512_pslli_w_512
3670 : Intrinsic::x86_avx512_psll_w_512;
3671 else
3672 llvm_unreachable("Unexpected size");
3673 }
3674
3675 Rep = upgradeX86MaskedShift(Builder, *CI, IID);
3676 } else if (IsX86 && Name.starts_with("avx512.mask.psrl")) {
3677 bool IsImmediate = Name[16] == 'i' ||
3678 (Name.size() > 18 && Name[18] == 'i');
3679 bool IsVariable = Name[16] == 'v';
3680 char Size = Name[16] == '.' ? Name[17] :
3681 Name[17] == '.' ? Name[18] :
3682 Name[18] == '.' ? Name[19] :
3683 Name[20];
3684
3685 Intrinsic::ID IID;
3686 if (IsVariable && Name[17] != '.') {
3687 if (Size == 'd' && Name[17] == '2') // avx512.mask.psrlv2.di
3688 IID = Intrinsic::x86_avx2_psrlv_q;
3689 else if (Size == 'd' && Name[17] == '4') // avx512.mask.psrlv4.di
3690 IID = Intrinsic::x86_avx2_psrlv_q_256;
3691 else if (Size == 's' && Name[17] == '4') // avx512.mask.psrlv4.si
3692 IID = Intrinsic::x86_avx2_psrlv_d;
3693 else if (Size == 's' && Name[17] == '8') // avx512.mask.psrlv8.si
3694 IID = Intrinsic::x86_avx2_psrlv_d_256;
3695 else if (Size == 'h' && Name[17] == '8') // avx512.mask.psrlv8.hi
3696 IID = Intrinsic::x86_avx512_psrlv_w_128;
3697 else if (Size == 'h' && Name[17] == '1') // avx512.mask.psrlv16.hi
3698 IID = Intrinsic::x86_avx512_psrlv_w_256;
3699 else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psrlv32hi
3700 IID = Intrinsic::x86_avx512_psrlv_w_512;
3701 else
3702 llvm_unreachable("Unexpected size");
3703 } else if (Name.ends_with(".128")) {
3704 if (Size == 'd') // avx512.mask.psrl.d.128, avx512.mask.psrl.di.128
3705 IID = IsImmediate ? Intrinsic::x86_sse2_psrli_d
3706 : Intrinsic::x86_sse2_psrl_d;
3707 else if (Size == 'q') // avx512.mask.psrl.q.128, avx512.mask.psrl.qi.128
3708 IID = IsImmediate ? Intrinsic::x86_sse2_psrli_q
3709 : Intrinsic::x86_sse2_psrl_q;
3710 else if (Size == 'w') // avx512.mask.psrl.w.128, avx512.mask.psrl.wi.128
3711 IID = IsImmediate ? Intrinsic::x86_sse2_psrli_w
3712 : Intrinsic::x86_sse2_psrl_w;
3713 else
3714 llvm_unreachable("Unexpected size");
3715 } else if (Name.ends_with(".256")) {
3716 if (Size == 'd') // avx512.mask.psrl.d.256, avx512.mask.psrl.di.256
3717 IID = IsImmediate ? Intrinsic::x86_avx2_psrli_d
3718 : Intrinsic::x86_avx2_psrl_d;
3719 else if (Size == 'q') // avx512.mask.psrl.q.256, avx512.mask.psrl.qi.256
3720 IID = IsImmediate ? Intrinsic::x86_avx2_psrli_q
3721 : Intrinsic::x86_avx2_psrl_q;
3722 else if (Size == 'w') // avx512.mask.psrl.w.256, avx512.mask.psrl.wi.256
3723 IID = IsImmediate ? Intrinsic::x86_avx2_psrli_w
3724 : Intrinsic::x86_avx2_psrl_w;
3725 else
3726 llvm_unreachable("Unexpected size");
3727 } else {
3728 if (Size == 'd') // psrl.di.512, psrli.d, psrl.d, psrl.d.512
3729 IID = IsImmediate ? Intrinsic::x86_avx512_psrli_d_512 :
3730 IsVariable ? Intrinsic::x86_avx512_psrlv_d_512 :
3731 Intrinsic::x86_avx512_psrl_d_512;
3732 else if (Size == 'q') // psrl.qi.512, psrli.q, psrl.q, psrl.q.512
3733 IID = IsImmediate ? Intrinsic::x86_avx512_psrli_q_512 :
3734 IsVariable ? Intrinsic::x86_avx512_psrlv_q_512 :
3735 Intrinsic::x86_avx512_psrl_q_512;
3736 else if (Size == 'w') // psrl.wi.512, psrli.w, psrl.w)
3737 IID = IsImmediate ? Intrinsic::x86_avx512_psrli_w_512
3738 : Intrinsic::x86_avx512_psrl_w_512;
3739 else
3740 llvm_unreachable("Unexpected size");
3741 }
3742
3743 Rep = upgradeX86MaskedShift(Builder, *CI, IID);
3744 } else if (IsX86 && Name.starts_with("avx512.mask.psra")) {
3745 bool IsImmediate = Name[16] == 'i' ||
3746 (Name.size() > 18 && Name[18] == 'i');
3747 bool IsVariable = Name[16] == 'v';
3748 char Size = Name[16] == '.' ? Name[17] :
3749 Name[17] == '.' ? Name[18] :
3750 Name[18] == '.' ? Name[19] :
3751 Name[20];
3752
3753 Intrinsic::ID IID;
3754 if (IsVariable && Name[17] != '.') {
3755 if (Size == 's' && Name[17] == '4') // avx512.mask.psrav4.si
3756 IID = Intrinsic::x86_avx2_psrav_d;
3757 else if (Size == 's' && Name[17] == '8') // avx512.mask.psrav8.si
3758 IID = Intrinsic::x86_avx2_psrav_d_256;
3759 else if (Size == 'h' && Name[17] == '8') // avx512.mask.psrav8.hi
3760 IID = Intrinsic::x86_avx512_psrav_w_128;
3761 else if (Size == 'h' && Name[17] == '1') // avx512.mask.psrav16.hi
3762 IID = Intrinsic::x86_avx512_psrav_w_256;
3763 else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psrav32hi
3764 IID = Intrinsic::x86_avx512_psrav_w_512;
3765 else
3766 llvm_unreachable("Unexpected size");
3767 } else if (Name.ends_with(".128")) {
3768 if (Size == 'd') // avx512.mask.psra.d.128, avx512.mask.psra.di.128
3769 IID = IsImmediate ? Intrinsic::x86_sse2_psrai_d
3770 : Intrinsic::x86_sse2_psra_d;
3771 else if (Size == 'q') // avx512.mask.psra.q.128, avx512.mask.psra.qi.128
3772 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_128 :
3773 IsVariable ? Intrinsic::x86_avx512_psrav_q_128 :
3774 Intrinsic::x86_avx512_psra_q_128;
3775 else if (Size == 'w') // avx512.mask.psra.w.128, avx512.mask.psra.wi.128
3776 IID = IsImmediate ? Intrinsic::x86_sse2_psrai_w
3777 : Intrinsic::x86_sse2_psra_w;
3778 else
3779 llvm_unreachable("Unexpected size");
3780 } else if (Name.ends_with(".256")) {
3781 if (Size == 'd') // avx512.mask.psra.d.256, avx512.mask.psra.di.256
3782 IID = IsImmediate ? Intrinsic::x86_avx2_psrai_d
3783 : Intrinsic::x86_avx2_psra_d;
3784 else if (Size == 'q') // avx512.mask.psra.q.256, avx512.mask.psra.qi.256
3785 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_256 :
3786 IsVariable ? Intrinsic::x86_avx512_psrav_q_256 :
3787 Intrinsic::x86_avx512_psra_q_256;
3788 else if (Size == 'w') // avx512.mask.psra.w.256, avx512.mask.psra.wi.256
3789 IID = IsImmediate ? Intrinsic::x86_avx2_psrai_w
3790 : Intrinsic::x86_avx2_psra_w;
3791 else
3792 llvm_unreachable("Unexpected size");
3793 } else {
3794 if (Size == 'd') // psra.di.512, psrai.d, psra.d, psrav.d.512
3795 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_d_512 :
3796 IsVariable ? Intrinsic::x86_avx512_psrav_d_512 :
3797 Intrinsic::x86_avx512_psra_d_512;
3798 else if (Size == 'q') // psra.qi.512, psrai.q, psra.q
3799 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_512 :
3800 IsVariable ? Intrinsic::x86_avx512_psrav_q_512 :
3801 Intrinsic::x86_avx512_psra_q_512;
3802 else if (Size == 'w') // psra.wi.512, psrai.w, psra.w
3803 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_w_512
3804 : Intrinsic::x86_avx512_psra_w_512;
3805 else
3806 llvm_unreachable("Unexpected size");
3807 }
3808
3809 Rep = upgradeX86MaskedShift(Builder, *CI, IID);
3810 } else if (IsX86 && Name.starts_with("avx512.mask.move.s")) {
3811 Rep = upgradeMaskedMove(Builder, *CI);
3812 } else if (IsX86 && Name.starts_with("avx512.cvtmask2")) {
3813 Rep = upgradeMaskToInt(Builder, *CI);
3814 } else if (IsX86 && Name.ends_with(".movntdqa")) {
3815 MDNode *Node = MDNode::get(
3816 C, ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1)));
3817
3818 Value *Ptr = CI->getArgOperand(0);
3819
3820 // Convert the type of the pointer to a pointer to the stored type.
3821 Value *BC = Builder.CreateBitCast(
3822 Ptr, PointerType::getUnqual(CI->getType()), "cast");
3823 LoadInst *LI = Builder.CreateAlignedLoad(
3824 CI->getType(), BC,
3826 LI->setMetadata(LLVMContext::MD_nontemporal, Node);
3827 Rep = LI;
3828 } else if (IsX86 && (Name.starts_with("fma.vfmadd.") ||
3829 Name.starts_with("fma.vfmsub.") ||
3830 Name.starts_with("fma.vfnmadd.") ||
3831 Name.starts_with("fma.vfnmsub."))) {
3832 bool NegMul = Name[6] == 'n';
3833 bool NegAcc = NegMul ? Name[8] == 's' : Name[7] == 's';
3834 bool IsScalar = NegMul ? Name[12] == 's' : Name[11] == 's';
3835
3836 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1),
3837 CI->getArgOperand(2) };
3838
3839 if (IsScalar) {
3840 Ops[0] = Builder.CreateExtractElement(Ops[0], (uint64_t)0);
3841 Ops[1] = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
3842 Ops[2] = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
3843 }
3844
3845 if (NegMul && !IsScalar)
3846 Ops[0] = Builder.CreateFNeg(Ops[0]);
3847 if (NegMul && IsScalar)
3848 Ops[1] = Builder.CreateFNeg(Ops[1]);
3849 if (NegAcc)
3850 Ops[2] = Builder.CreateFNeg(Ops[2]);
3851
3853 Intrinsic::fma,
3854 Ops[0]->getType()),
3855 Ops);
3856
3857 if (IsScalar)
3858 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep,
3859 (uint64_t)0);
3860 } else if (IsX86 && Name.starts_with("fma4.vfmadd.s")) {
3861 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1),
3862 CI->getArgOperand(2) };
3863
3864 Ops[0] = Builder.CreateExtractElement(Ops[0], (uint64_t)0);
3865 Ops[1] = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
3866 Ops[2] = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
3867
3869 Intrinsic::fma,
3870 Ops[0]->getType()),
3871 Ops);
3872
3874 Rep, (uint64_t)0);
3875 } else if (IsX86 && (Name.starts_with("avx512.mask.vfmadd.s") ||
3876 Name.starts_with("avx512.maskz.vfmadd.s") ||
3877 Name.starts_with("avx512.mask3.vfmadd.s") ||
3878 Name.starts_with("avx512.mask3.vfmsub.s") ||
3879 Name.starts_with("avx512.mask3.vfnmsub.s"))) {
3880 bool IsMask3 = Name[11] == '3';
3881 bool IsMaskZ = Name[11] == 'z';
3882 // Drop the "avx512.mask." to make it easier.
3883 Name = Name.drop_front(IsMask3 || IsMaskZ ? 13 : 12);
3884 bool NegMul = Name[2] == 'n';
3885 bool NegAcc = NegMul ? Name[4] == 's' : Name[3] == 's';
3886
3887 Value *A = CI->getArgOperand(0);
3888 Value *B = CI->getArgOperand(1);
3889 Value *C = CI->getArgOperand(2);
3890
3891 if (NegMul && (IsMask3 || IsMaskZ))
3892 A = Builder.CreateFNeg(A);
3893 if (NegMul && !(IsMask3 || IsMaskZ))
3894 B = Builder.CreateFNeg(B);
3895 if (NegAcc)
3896 C = Builder.CreateFNeg(C);
3897
3898 A = Builder.CreateExtractElement(A, (uint64_t)0);
3899 B = Builder.CreateExtractElement(B, (uint64_t)0);
3900 C = Builder.CreateExtractElement(C, (uint64_t)0);
3901
3902 if (!isa<ConstantInt>(CI->getArgOperand(4)) ||
3903 cast<ConstantInt>(CI->getArgOperand(4))->getZExtValue() != 4) {
3904 Value *Ops[] = { A, B, C, CI->getArgOperand(4) };
3905
3906 Intrinsic::ID IID;
3907 if (Name.back() == 'd')
3908 IID = Intrinsic::x86_avx512_vfmadd_f64;
3909 else
3910 IID = Intrinsic::x86_avx512_vfmadd_f32;
3911 Function *FMA = Intrinsic::getDeclaration(CI->getModule(), IID);
3912 Rep = Builder.CreateCall(FMA, Ops);
3913 } else {
3915 Intrinsic::fma,
3916 A->getType());
3917 Rep = Builder.CreateCall(FMA, { A, B, C });
3918 }
3919
3920 Value *PassThru = IsMaskZ ? Constant::getNullValue(Rep->getType()) :
3921 IsMask3 ? C : A;
3922
3923 // For Mask3 with NegAcc, we need to create a new extractelement that
3924 // avoids the negation above.
3925 if (NegAcc && IsMask3)
3926 PassThru = Builder.CreateExtractElement(CI->getArgOperand(2),
3927 (uint64_t)0);
3928
3929 Rep = emitX86ScalarSelect(Builder, CI->getArgOperand(3), Rep, PassThru);
3930 Rep = Builder.CreateInsertElement(CI->getArgOperand(IsMask3 ? 2 : 0),
3931 Rep, (uint64_t)0);
3932 } else if (IsX86 && (Name.starts_with("avx512.mask.vfmadd.p") ||
3933 Name.starts_with("avx512.mask.vfnmadd.p") ||
3934 Name.starts_with("avx512.mask.vfnmsub.p") ||
3935 Name.starts_with("avx512.mask3.vfmadd.p") ||
3936 Name.starts_with("avx512.mask3.vfmsub.p") ||
3937 Name.starts_with("avx512.mask3.vfnmsub.p") ||
3938 Name.starts_with("avx512.maskz.vfmadd.p"))) {
3939 bool IsMask3 = Name[11] == '3';
3940 bool IsMaskZ = Name[11] == 'z';
3941 // Drop the "avx512.mask." to make it easier.
3942 Name = Name.drop_front(IsMask3 || IsMaskZ ? 13 : 12);
3943 bool NegMul = Name[2] == 'n';
3944 bool NegAcc = NegMul ? Name[4] == 's' : Name[3] == 's';
3945
3946 Value *A = CI->getArgOperand(0);
3947 Value *B = CI->getArgOperand(1);
3948 Value *C = CI->getArgOperand(2);
3949
3950 if (NegMul && (IsMask3 || IsMaskZ))
3951 A = Builder.CreateFNeg(A);
3952 if (NegMul && !(IsMask3 || IsMaskZ))
3953 B = Builder.CreateFNeg(B);
3954 if (NegAcc)
3955 C = Builder.CreateFNeg(C);
3956
3957 if (CI->arg_size() == 5 &&
3958 (!isa<ConstantInt>(CI->getArgOperand(4)) ||
3959 cast<ConstantInt>(CI->getArgOperand(4))->getZExtValue() != 4)) {
3960 Intrinsic::ID IID;
3961 // Check the character before ".512" in string.
3962 if (Name[Name.size()-5] == 's')
3963 IID = Intrinsic::x86_avx512_vfmadd_ps_512;
3964 else
3965 IID = Intrinsic::x86_avx512_vfmadd_pd_512;
3966
3967 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
3968 { A, B, C, CI->getArgOperand(4) });
3969 } else {
3971 Intrinsic::fma,
3972 A->getType());
3973 Rep = Builder.CreateCall(FMA, { A, B, C });
3974 }
3975
3976 Value *PassThru = IsMaskZ ? llvm::Constant::getNullValue(CI->getType()) :
3977 IsMask3 ? CI->getArgOperand(2) :
3978 CI->getArgOperand(0);
3979
3980 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
3981 } else if (IsX86 && Name.starts_with("fma.vfmsubadd.p")) {
3982 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
3983 unsigned EltWidth = CI->getType()->getScalarSizeInBits();
3984 Intrinsic::ID IID;
3985 if (VecWidth == 128 && EltWidth == 32)
3986 IID = Intrinsic::x86_fma_vfmaddsub_ps;
3987 else if (VecWidth == 256 && EltWidth == 32)
3988 IID = Intrinsic::x86_fma_vfmaddsub_ps_256;
3989 else if (VecWidth == 128 && EltWidth == 64)
3990 IID = Intrinsic::x86_fma_vfmaddsub_pd;
3991 else if (VecWidth == 256 && EltWidth == 64)
3992 IID = Intrinsic::x86_fma_vfmaddsub_pd_256;
3993 else
3994 llvm_unreachable("Unexpected intrinsic");
3995
3996 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1),
3997 CI->getArgOperand(2) };
3998 Ops[2] = Builder.CreateFNeg(Ops[2]);
3999 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
4000 Ops);
4001 } else if (IsX86 && (Name.starts_with("avx512.mask.vfmaddsub.p") ||
4002 Name.starts_with("avx512.mask3.vfmaddsub.p") ||
4003 Name.starts_with("avx512.maskz.vfmaddsub.p") ||
4004 Name.starts_with("avx512.mask3.vfmsubadd.p"))) {
4005 bool IsMask3 = Name[11] == '3';
4006 bool IsMaskZ = Name[11] == 'z';
4007 // Drop the "avx512.mask." to make it easier.
4008 Name = Name.drop_front(IsMask3 || IsMaskZ ? 13 : 12);
4009 bool IsSubAdd = Name[3] == 's';
4010 if (CI->arg_size() == 5) {
4011 Intrinsic::ID IID;
4012 // Check the character before ".512" in string.
4013 if (Name[Name.size()-5] == 's')
4014 IID = Intrinsic::x86_avx512_vfmaddsub_ps_512;
4015 else
4016 IID = Intrinsic::x86_avx512_vfmaddsub_pd_512;
4017
4018 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1),
4019 CI->getArgOperand(2), CI->getArgOperand(4) };
4020 if (IsSubAdd)
4021 Ops[2] = Builder.CreateFNeg(Ops[2]);
4022
4023 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
4024 Ops);
4025 } else {
4026 int NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
4027
4028 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1),
4029 CI->getArgOperand(2) };
4030
4031 Function *FMA = Intrinsic::getDeclaration(CI->getModule(), Intrinsic::fma,
4032 Ops[0]->getType());
4033 Value *Odd = Builder.CreateCall(FMA, Ops);
4034 Ops[2] = Builder.CreateFNeg(Ops[2]);
4035 Value *Even = Builder.CreateCall(FMA, Ops);
4036
4037 if (IsSubAdd)
4038 std::swap(Even, Odd);
4039
4040 SmallVector<int, 32> Idxs(NumElts);
4041 for (int i = 0; i != NumElts; ++i)
4042 Idxs[i] = i + (i % 2) * NumElts;
4043
4044 Rep = Builder.CreateShuffleVector(Even, Odd, Idxs);
4045 }
4046
4047 Value *PassThru = IsMaskZ ? llvm::Constant::getNullValue(CI->getType()) :
4048 IsMask3 ? CI->getArgOperand(2) :
4049 CI->getArgOperand(0);
4050
4051 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
4052 } else if (IsX86 && (Name.starts_with("avx512.mask.pternlog.") ||
4053 Name.starts_with("avx512.maskz.pternlog."))) {
4054 bool ZeroMask = Name[11] == 'z';
4055 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
4056 unsigned EltWidth = CI->getType()->getScalarSizeInBits();
4057 Intrinsic::ID IID;
4058 if (VecWidth == 128 && EltWidth == 32)
4059 IID = Intrinsic::x86_avx512_pternlog_d_128;
4060 else if (VecWidth == 256 && EltWidth == 32)
4061 IID = Intrinsic::x86_avx512_pternlog_d_256;
4062 else if (VecWidth == 512 && EltWidth == 32)
4063 IID = Intrinsic::x86_avx512_pternlog_d_512;
4064 else if (VecWidth == 128 && EltWidth == 64)
4065 IID = Intrinsic::x86_avx512_pternlog_q_128;
4066 else if (VecWidth == 256 && EltWidth == 64)
4067 IID = Intrinsic::x86_avx512_pternlog_q_256;
4068 else if (VecWidth == 512 && EltWidth == 64)
4069 IID = Intrinsic::x86_avx512_pternlog_q_512;
4070 else
4071 llvm_unreachable("Unexpected intrinsic");
4072
4073 Value *Args[] = { CI->getArgOperand(0) , CI->getArgOperand(1),
4074 CI->getArgOperand(2), CI->getArgOperand(3) };
4075 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID),
4076 Args);
4077 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType())
4078 : CI->getArgOperand(0);
4079 Rep = emitX86Select(Builder, CI->getArgOperand(4), Rep, PassThru);
4080 } else if (IsX86 && (Name.starts_with("avx512.mask.vpmadd52") ||
4081 Name.starts_with("avx512.maskz.vpmadd52"))) {
4082 bool ZeroMask = Name[11] == 'z';
4083 bool High = Name[20] == 'h' || Name[21] == 'h';
4084 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
4085 Intrinsic::ID IID;
4086 if (VecWidth == 128 && !High)
4087 IID = Intrinsic::x86_avx512_vpmadd52l_uq_128;
4088 else if (VecWidth == 256 && !High)
4089 IID = Intrinsic::x86_avx512_vpmadd52l_uq_256;
4090 else if (VecWidth == 512 && !High)
4091 IID = Intrinsic::x86_avx512_vpmadd52l_uq_512;
4092 else if (VecWidth == 128 && High)
4093 IID = Intrinsic::x86_avx512_vpmadd52h_uq_128;
4094 else if (VecWidth == 256 && High)
4095 IID = Intrinsic::x86_avx512_vpmadd52h_uq_256;
4096 else if (VecWidth == 512 && High)
4097 IID = Intrinsic::x86_avx512_vpmadd52h_uq_512;
4098 else
4099 llvm_unreachable("Unexpected intrinsic");
4100
4101 Value *Args[] = { CI->getArgOperand(0) , CI->getArgOperand(1),
4102 CI->getArgOperand(2) };
4103 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID),
4104 Args);
4105 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType())
4106 : CI->getArgOperand(0);
4107 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
4108 } else if (IsX86 && (Name.starts_with("avx512.mask.vpermi2var.") ||
4109 Name.starts_with("avx512.mask.vpermt2var.") ||
4110 Name.starts_with("avx512.maskz.vpermt2var."))) {
4111 bool ZeroMask = Name[11] == 'z';
4112 bool IndexForm = Name[17] == 'i';
4113 Rep = upgradeX86VPERMT2Intrinsics(Builder, *CI, ZeroMask, IndexForm);
4114 } else if (IsX86 && (Name.starts_with("avx512.mask.vpdpbusd.") ||
4115 Name.starts_with("avx512.maskz.vpdpbusd.") ||
4116 Name.starts_with("avx512.mask.vpdpbusds.") ||
4117 Name.starts_with("avx512.maskz.vpdpbusds."))) {
4118 bool ZeroMask = Name[11] == 'z';
4119 bool IsSaturating = Name[ZeroMask ? 21 : 20] == 's';
4120 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
4121 Intrinsic::ID IID;
4122 if (VecWidth == 128 && !IsSaturating)
4123 IID = Intrinsic::x86_avx512_vpdpbusd_128;
4124 else if (VecWidth == 256 && !IsSaturating)
4125 IID = Intrinsic::x86_avx512_vpdpbusd_256;
4126 else if (VecWidth == 512 && !IsSaturating)
4127 IID = Intrinsic::x86_avx512_vpdpbusd_512;
4128 else if (VecWidth == 128 && IsSaturating)
4129 IID = Intrinsic::x86_avx512_vpdpbusds_128;
4130 else if (VecWidth == 256 && IsSaturating)
4131 IID = Intrinsic::x86_avx512_vpdpbusds_256;
4132 else if (VecWidth == 512 && IsSaturating)
4133 IID = Intrinsic::x86_avx512_vpdpbusds_512;
4134 else
4135 llvm_unreachable("Unexpected intrinsic");
4136
4137 Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(1),
4138 CI->getArgOperand(2) };
4139 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID),
4140 Args);
4141 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType())
4142 : CI->getArgOperand(0);
4143 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
4144 } else if (IsX86 && (Name.starts_with("avx512.mask.vpdpwssd.") ||
4145 Name.starts_with("avx512.maskz.vpdpwssd.") ||
4146 Name.starts_with("avx512.mask.vpdpwssds.") ||
4147 Name.starts_with("avx512.maskz.vpdpwssds."))) {
4148 bool ZeroMask = Name[11] == 'z';
4149 bool IsSaturating = Name[ZeroMask ? 21 : 20] == 's';
4150 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
4151 Intrinsic::ID IID;
4152 if (VecWidth == 128 && !IsSaturating)
4153 IID = Intrinsic::x86_avx512_vpdpwssd_128;
4154 else if (VecWidth == 256 && !IsSaturating)
4155 IID = Intrinsic::x86_avx512_vpdpwssd_256;
4156 else if (VecWidth == 512 && !IsSaturating)
4157 IID = Intrinsic::x86_avx512_vpdpwssd_512;
4158 else if (VecWidth == 128 && IsSaturating)
4159 IID = Intrinsic::x86_avx512_vpdpwssds_128;
4160 else if (VecWidth == 256 && IsSaturating)
4161 IID = Intrinsic::x86_avx512_vpdpwssds_256;
4162 else if (VecWidth == 512 && IsSaturating)
4163 IID = Intrinsic::x86_avx512_vpdpwssds_512;
4164 else
4165 llvm_unreachable("Unexpected intrinsic");
4166
4167 Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(1),
4168 CI->getArgOperand(2) };
4169 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID),
4170 Args);
4171 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType())
4172 : CI->getArgOperand(0);
4173 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
4174 } else if (IsX86 && (Name == "addcarryx.u32" || Name == "addcarryx.u64" ||
4175 Name == "addcarry.u32" || Name == "addcarry.u64" ||
4176 Name == "subborrow.u32" || Name == "subborrow.u64")) {
4177 Intrinsic::ID IID;
4178 if (Name[0] == 'a' && Name.back() == '2')
4179 IID = Intrinsic::x86_addcarry_32;
4180 else if (Name[0] == 'a' && Name.back() == '4')
4181 IID = Intrinsic::x86_addcarry_64;
4182 else if (Name[0] == 's' && Name.back() == '2')
4183 IID = Intrinsic::x86_subborrow_32;
4184 else if (Name[0] == 's' && Name.back() == '4')
4185 IID = Intrinsic::x86_subborrow_64;
4186 else
4187 llvm_unreachable("Unexpected intrinsic");
4188
4189 // Make a call with 3 operands.
4190 Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(1),
4191 CI->getArgOperand(2)};
4192 Value *NewCall = Builder.CreateCall(
4194 Args);
4195
4196 // Extract the second result and store it.
4197 Value *Data = Builder.CreateExtractValue(NewCall, 1);
4198 // Cast the pointer to the right type.
4199 Value *Ptr = Builder.CreateBitCast(CI->getArgOperand(3),
4200 llvm::PointerType::getUnqual(Data->getType()));
4201 Builder.CreateAlignedStore(Data, Ptr, Align(1));
4202 // Replace the original call result with the first result of the new call.
4203 Value *CF = Builder.CreateExtractValue(NewCall, 0);
4204
4205 CI->replaceAllUsesWith(CF);
4206 Rep = nullptr;
4207 } else if (IsX86 && Name.starts_with("avx512.mask.") &&
4208 upgradeAVX512MaskToSelect(Name, Builder, *CI, Rep)) {
4209 // Rep will be updated by the call in the condition.
4210 } else if (IsNVVM && (Name == "abs.i" || Name == "abs.ll")) {
4211 Value *Arg = CI->getArgOperand(0);
4212 Value *Neg = Builder.CreateNeg(Arg, "neg");
4213 Value *Cmp = Builder.CreateICmpSGE(
4214 Arg, llvm::Constant::getNullValue(Arg->getType()), "abs.cond");
4215 Rep = Builder.CreateSelect(Cmp, Arg, Neg, "abs");
4216 } else if (IsNVVM && (Name.starts_with("atomic.load.add.f32.p") ||
4217 Name.starts_with("atomic.load.add.f64.p"))) {
4218 Value *Ptr = CI->getArgOperand(0);
4219 Value *Val = CI->getArgOperand(1);
4220 Rep = Builder.CreateAtomicRMW(AtomicRMWInst::FAdd, Ptr, Val, MaybeAlign(),
4221 AtomicOrdering::SequentiallyConsistent);
4222 } else if (IsNVVM && Name.consume_front("max.") &&
4223 (Name == "s" || Name == "i" || Name == "ll" || Name == "us" ||
4224 Name == "ui" || Name == "ull")) {
4225 Value *Arg0 = CI->getArgOperand(0);
4226 Value *Arg1 = CI->getArgOperand(1);
4227 Value *Cmp = Name.starts_with("u")
4228 ? Builder.CreateICmpUGE(Arg0, Arg1, "max.cond")
4229 : Builder.CreateICmpSGE(Arg0, Arg1, "max.cond");
4230 Rep = Builder.CreateSelect(Cmp, Arg0, Arg1, "max");
4231 } else if (IsNVVM && Name.consume_front("min.") &&
4232 (Name == "s" || Name == "i" || Name == "ll" || Name == "us" ||
4233 Name == "ui" || Name == "ull")) {
4234 Value *Arg0 = CI->getArgOperand(0);
4235 Value *Arg1 = CI->getArgOperand(1);
4236 Value *Cmp = Name.starts_with("u")
4237 ? Builder.CreateICmpULE(Arg0, Arg1, "min.cond")
4238 : Builder.CreateICmpSLE(Arg0, Arg1, "min.cond");
4239 Rep = Builder.CreateSelect(Cmp, Arg0, Arg1, "min");
4240 } else if (IsNVVM && Name == "clz.ll") {
4241 // llvm.nvvm.clz.ll returns an i32, but llvm.ctlz.i64 returns an i64.
4242 Value *Arg = CI->getArgOperand(0);
4243 Value *Ctlz = Builder.CreateCall(
4244 Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctlz,
4245 {Arg->getType()}),
4246 {Arg, Builder.getFalse()}, "ctlz");
4247 Rep = Builder.CreateTrunc(Ctlz, Builder.getInt32Ty(), "ctlz.trunc");
4248 } else if (IsNVVM && Name == "popc.ll") {
4249 // llvm.nvvm.popc.ll returns an i32, but llvm.ctpop.i64 returns an
4250 // i64.
4251 Value *Arg = CI->getArgOperand(0);
4252 Value *Popc = Builder.CreateCall(
4253 Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctpop,
4254 {Arg->getType()}),
4255 Arg, "ctpop");
4256 Rep = Builder.CreateTrunc(Popc, Builder.getInt32Ty(), "ctpop.trunc");
4257 } else if (IsNVVM) {
4258 if (Name == "h2f") {
4259 Rep =
4261 F->getParent(), Intrinsic::convert_from_fp16,
4262 {Builder.getFloatTy()}),
4263 CI->getArgOperand(0), "h2f");
4264 } else {
4266 if (IID != Intrinsic::not_intrinsic &&
4267 !F->getReturnType()->getScalarType()->isBFloatTy()) {
4268 rename(F);
4269 NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
4271 for (size_t I = 0; I < NewFn->arg_size(); ++I) {
4272 Value *Arg = CI->getArgOperand(I);
4273 Type *OldType = Arg->getType();
4274 Type *NewType = NewFn->getArg(I)->getType();
4275 Args.push_back((OldType->isIntegerTy() &&
4276 NewType->getScalarType()->isBFloatTy())
4277 ? Builder.CreateBitCast(Arg, NewType)
4278 : Arg);
4279 }
4280 Rep = Builder.CreateCall(NewFn, Args);
4281 if (F->getReturnType()->isIntegerTy())
4282 Rep = Builder.CreateBitCast(Rep, F->getReturnType());
4283 }
4284 }
4285 } else if (IsARM) {
4286 Rep = upgradeARMIntrinsicCall(Name, CI, F, Builder);
4287 } else if (IsAMDGCN) {
4288 Rep = upgradeAMDGCNIntrinsicCall(Name, CI, F, Builder);
4289 } else if (IsDbg) {
4290 // We might have decided we don't want the new format after all between
4291 // first requesting the upgrade and now; skip the conversion if that is
4292 // the case, and check here to see if the intrinsic needs to be upgraded
4293 // normally.
4294 if (!CI->getModule()->IsNewDbgInfoFormat) {
4295 bool NeedsUpgrade =
4296 upgradeIntrinsicFunction1(CI->getCalledFunction(), NewFn, false);
4297 if (!NeedsUpgrade)
4298 return;
4299 FallthroughToDefaultUpgrade = true;
4300 } else {
4302 }
4303 } else {
4304 llvm_unreachable("Unknown function for CallBase upgrade.");
4305 }
4306
4307 if (!FallthroughToDefaultUpgrade) {
4308 if (Rep)
4309 CI->replaceAllUsesWith(Rep);
4310 CI->eraseFromParent();
4311 return;
4312 }
4313 }
4314
4315 const auto &DefaultCase = [&]() -> void {
4316 if (CI->getFunctionType() == NewFn->getFunctionType()) {
4317 // Handle generic mangling change.
4318 assert(
4319 (CI->getCalledFunction()->getName() != NewFn->getName()) &&
4320 "Unknown function for CallBase upgrade and isn't just a name change");
4321 CI->setCalledFunction(NewFn);
4322 return;
4323 }
4324
4325 // This must be an upgrade from a named to a literal struct.
4326 if (auto *OldST = dyn_cast<StructType>(CI->getType())) {
4327 assert(OldST != NewFn->getReturnType() &&
4328 "Return type must have changed");
4329 assert(OldST->getNumElements() ==
4330 cast<StructType>(NewFn->getReturnType())->getNumElements() &&
4331 "Must have same number of elements");
4332
4333 SmallVector<Value *> Args(CI->args());
4334 Value *NewCI = Builder.CreateCall(NewFn, Args);
4335 Value *Res = PoisonValue::get(OldST);
4336 for (unsigned Idx = 0; Idx < OldST->getNumElements(); ++Idx) {
4337 Value *Elem = Builder.CreateExtractValue(NewCI, Idx);
4338 Res = Builder.CreateInsertValue(Res, Elem, Idx);
4339 }
4340 CI->replaceAllUsesWith(Res);
4341 CI->eraseFromParent();
4342 return;
4343 }
4344
4345 // We're probably about to produce something invalid. Let the verifier catch
4346 // it instead of dying here.
4347 CI->setCalledOperand(
4349 return;
4350 };
4351 CallInst *NewCall = nullptr;
4352 switch (NewFn->getIntrinsicID()) {
4353 default: {
4354 DefaultCase();
4355 return;
4356 }
4357 case Intrinsic::arm_neon_vst1:
4358 case Intrinsic::arm_neon_vst2:
4359 case Intrinsic::arm_neon_vst3:
4360 case Intrinsic::arm_neon_vst4:
4361 case Intrinsic::arm_neon_vst2lane:
4362 case Intrinsic::arm_neon_vst3lane:
4363 case Intrinsic::arm_neon_vst4lane: {
4364 SmallVector<Value *, 4> Args(CI->args());
4365 NewCall = Builder.CreateCall(NewFn, Args);
4366 break;
4367 }
4368 case Intrinsic::aarch64_sve_bfmlalb_lane_v2:
4369 case Intrinsic::aarch64_sve_bfmlalt_lane_v2:
4370 case Intrinsic::aarch64_sve_bfdot_lane_v2: {
4371 LLVMContext &Ctx = F->getParent()->getContext();
4372 SmallVector<Value *, 4> Args(CI->args());
4373 Args[3] = ConstantInt::get(Type::getInt32Ty(Ctx),
4374 cast<ConstantInt>(Args[3])->getZExtValue());
4375 NewCall = Builder.CreateCall(NewFn, Args);
4376 break;
4377 }
4378 case Intrinsic::aarch64_sve_ld3_sret:
4379 case Intrinsic::aarch64_sve_ld4_sret:
4380 case Intrinsic::aarch64_sve_ld2_sret: {
4381 StringRef Name = F->getName();
4382 Name = Name.substr(5);
4383 unsigned N = StringSwitch<unsigned>(Name)
4384 .StartsWith("aarch64.sve.ld2", 2)
4385 .StartsWith("aarch64.sve.ld3", 3)
4386 .StartsWith("aarch64.sve.ld4", 4)
4387 .Default(0);
4389 dyn_cast<ScalableVectorType>(F->getReturnType());
4390 unsigned MinElts = RetTy->getMinNumElements() / N;
4391 SmallVector<Value *, 2> Args(CI->args());
4392 Value *NewLdCall = Builder.CreateCall(NewFn, Args);
4394 for (unsigned I = 0; I < N; I++) {
4395 Value *Idx = ConstantInt::get(Type::getInt64Ty(C), I * MinElts);
4396 Value *SRet = Builder.CreateExtractValue(NewLdCall, I);
4397 Ret = Builder.CreateInsertVector(RetTy, Ret, SRet, Idx);
4398 }
4399 NewCall = dyn_cast<CallInst>(Ret);
4400 break;
4401 }
4402
4403 case Intrinsic::coro_end: {
4404 SmallVector<Value *, 3> Args(CI->args());
4405 Args.push_back(ConstantTokenNone::get(CI->getContext()));
4406 NewCall = Builder.CreateCall(NewFn, Args);
4407 break;
4408 }
4409
4410 case Intrinsic::vector_extract: {
4411 StringRef Name = F->getName();
4412 Name = Name.substr(5); // Strip llvm
4413 if (!Name.starts_with("aarch64.sve.tuple.get")) {
4414 DefaultCase();
4415 return;
4416 }
4418 dyn_cast<ScalableVectorType>(F->getReturnType());
4419 unsigned MinElts = RetTy->getMinNumElements();
4420 unsigned I = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
4421 Value *NewIdx = ConstantInt::get(Type::getInt64Ty(C), I * MinElts);
4422 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0), NewIdx});
4423 break;
4424 }
4425
4426 case Intrinsic::vector_insert: {
4427 StringRef Name = F->getName();
4428 Name = Name.substr(5);
4429 if (!Name.starts_with("aarch64.sve.tuple")) {
4430 DefaultCase();
4431 return;
4432 }
4433 if (Name.starts_with("aarch64.sve.tuple.set")) {
4434 unsigned I = dyn_cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
4435 ScalableVectorType *Ty =
4436 dyn_cast<ScalableVectorType>(CI->getArgOperand(2)->getType());
4437 Value *NewIdx =
4438 ConstantInt::get(Type::getInt64Ty(C), I * Ty->getMinNumElements());
4439 NewCall = Builder.CreateCall(
4440 NewFn, {CI->getArgOperand(0), CI->getArgOperand(2), NewIdx});
4441 break;
4442 }
4443 if (Name.starts_with("aarch64.sve.tuple.create")) {
4444 unsigned N = StringSwitch<unsigned>(Name)
4445 .StartsWith("aarch64.sve.tuple.create2", 2)
4446 .StartsWith("aarch64.sve.tuple.create3", 3)
4447 .StartsWith("aarch64.sve.tuple.create4", 4)
4448 .Default(0);
4449 assert(N > 1 && "Create is expected to be between 2-4");
4451 dyn_cast<ScalableVectorType>(F->getReturnType());
4453 unsigned MinElts = RetTy->getMinNumElements() / N;
4454 for (unsigned I = 0; I < N; I++) {
4455 Value *Idx = ConstantInt::get(Type::getInt64Ty(C), I * MinElts);
4456 Value *V = CI->getArgOperand(I);
4457 Ret = Builder.CreateInsertVector(RetTy, Ret, V, Idx);
4458 }
4459 NewCall = dyn_cast<CallInst>(Ret);
4460 }
4461 break;
4462 }
4463
4464 case Intrinsic::arm_neon_bfdot:
4465 case Intrinsic::arm_neon_bfmmla:
4466 case Intrinsic::arm_neon_bfmlalb:
4467 case Intrinsic::arm_neon_bfmlalt:
4468 case Intrinsic::aarch64_neon_bfdot:
4469 case Intrinsic::aarch64_neon_bfmmla:
4470 case Intrinsic::aarch64_neon_bfmlalb:
4471 case Intrinsic::aarch64_neon_bfmlalt: {
4473 assert(CI->arg_size() == 3 &&
4474 "Mismatch between function args and call args");
4475 size_t OperandWidth =
4477 assert((OperandWidth == 64 || OperandWidth == 128) &&
4478 "Unexpected operand width");
4479 Type *NewTy = FixedVectorType::get(Type::getBFloatTy(C), OperandWidth / 16);
4480 auto Iter = CI->args().begin();
4481 Args.push_back(*Iter++);
4482 Args.push_back(Builder.CreateBitCast(*Iter++, NewTy));
4483 Args.push_back(Builder.CreateBitCast(*Iter++, NewTy));
4484 NewCall = Builder.CreateCall(NewFn, Args);
4485 break;
4486 }
4487
4488 case Intrinsic::bitreverse:
4489 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)});
4490 break;
4491
4492 case Intrinsic::ctlz:
4493 case Intrinsic::cttz:
4494 assert(CI->arg_size() == 1 &&
4495 "Mismatch between function args and call args");
4496 NewCall =
4497 Builder.CreateCall(NewFn, {CI->getArgOperand(0), Builder.getFalse()});
4498 break;
4499
4500 case Intrinsic::objectsize: {
4501 Value *NullIsUnknownSize =
4502 CI->arg_size() == 2 ? Builder.getFalse() : CI->getArgOperand(2);
4503 Value *Dynamic =
4504 CI->arg_size() < 4 ? Builder.getFalse() : CI->getArgOperand(3);
4505 NewCall = Builder.CreateCall(
4506 NewFn, {CI->getArgOperand(0), CI->getArgOperand(1), NullIsUnknownSize, Dynamic});
4507 break;
4508 }
4509
4510 case Intrinsic::ctpop:
4511 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)});
4512 break;
4513
4514 case Intrinsic::convert_from_fp16:
4515 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)});
4516 break;
4517
4518 case Intrinsic::dbg_value: {
4519 StringRef Name = F->getName();
4520 Name = Name.substr(5); // Strip llvm.
4521 // Upgrade `dbg.addr` to `dbg.value` with `DW_OP_deref`.
4522 if (Name.starts_with("dbg.addr")) {
4523 DIExpression *Expr = cast<DIExpression>(
4524 cast<MetadataAsValue>(CI->getArgOperand(2))->getMetadata());
4525 Expr = DIExpression::append(Expr, dwarf::DW_OP_deref);
4526 NewCall =
4527 Builder.CreateCall(NewFn, {CI->getArgOperand(0), CI->getArgOperand(1),
4528 MetadataAsValue::get(C, Expr)});
4529 break;
4530 }
4531
4532 // Upgrade from the old version that had an extra offset argument.
4533 assert(CI->arg_size() == 4);
4534 // Drop nonzero offsets instead of attempting to upgrade them.
4535 if (auto *Offset = dyn_cast_or_null<Constant>(CI->getArgOperand(1)))
4536 if (Offset->isZeroValue()) {
4537 NewCall = Builder.CreateCall(
4538 NewFn,
4539 {CI->getArgOperand(0), CI->getArgOperand(2), CI->getArgOperand(3)});
4540 break;
4541 }
4542 CI->eraseFromParent();
4543 return;
4544 }
4545
4546 case Intrinsic::ptr_annotation:
4547 // Upgrade from versions that lacked the annotation attribute argument.
4548 if (CI->arg_size() != 4) {
4549 DefaultCase();
4550 return;
4551 }
4552
4553 // Create a new call with an added null annotation attribute argument.
4554 NewCall =
4555 Builder.CreateCall(NewFn, {CI->getArgOperand(0), CI->getArgOperand(1),
4556 CI->getArgOperand(2), CI->getArgOperand(3),
4557 Constant::getNullValue(Builder.getPtrTy())});
4558 NewCall->takeName(CI);
4559 CI->replaceAllUsesWith(NewCall);
4560 CI->eraseFromParent();
4561 return;
4562
4563 case Intrinsic::var_annotation:
4564 // Upgrade from versions that lacked the annotation attribute argument.
4565 if (CI->arg_size() != 4) {
4566 DefaultCase();
4567 return;
4568 }
4569 // Create a new call with an added null annotation attribute argument.
4570 NewCall =
4571 Builder.CreateCall(NewFn, {CI->getArgOperand(0), CI->getArgOperand(1),
4572 CI->getArgOperand(2), CI->getArgOperand(3),
4573 Constant::getNullValue(Builder.getPtrTy())});
4574 NewCall->takeName(CI);
4575 CI->replaceAllUsesWith(NewCall);
4576 CI->eraseFromParent();
4577 return;
4578
4579 case Intrinsic::riscv_aes32dsi:
4580 case Intrinsic::riscv_aes32dsmi:
4581 case Intrinsic::riscv_aes32esi:
4582 case Intrinsic::riscv_aes32esmi:
4583 case Intrinsic::riscv_sm4ks:
4584 case Intrinsic::riscv_sm4ed: {
4585 // The last argument to these intrinsics used to be i8 and changed to i32.
4586 // The type overload for sm4ks and sm4ed was removed.
4587 Value *Arg2 = CI->getArgOperand(2);
4588 if (Arg2->getType()->isIntegerTy(32) && !CI->getType()->isIntegerTy(64))
4589 return;
4590
4591 Value *Arg0 = CI->getArgOperand(0);
4592 Value *Arg1 = CI->getArgOperand(1);
4593 if (CI->getType()->isIntegerTy(64)) {
4594 Arg0 = Builder.CreateTrunc(Arg0, Builder.getInt32Ty());
4595 Arg1 = Builder.CreateTrunc(Arg1, Builder.getInt32Ty());
4596 }
4597
4598 Arg2 = ConstantInt::get(Type::getInt32Ty(C),
4599 cast<ConstantInt>(Arg2)->getZExtValue());
4600
4601 NewCall = Builder.CreateCall(NewFn, {Arg0, Arg1, Arg2});
4602 Value *Res = NewCall;
4603 if (Res->getType() != CI->getType())
4604 Res = Builder.CreateIntCast(NewCall, CI->getType(), /*isSigned*/ true);
4605 NewCall->takeName(CI);
4606 CI->replaceAllUsesWith(Res);
4607 CI->eraseFromParent();
4608 return;
4609 }
4610 case Intrinsic::riscv_sha256sig0:
4611 case Intrinsic::riscv_sha256sig1:
4612 case Intrinsic::riscv_sha256sum0:
4613 case Intrinsic::riscv_sha256sum1:
4614 case Intrinsic::riscv_sm3p0:
4615 case Intrinsic::riscv_sm3p1: {
4616 // The last argument to these intrinsics used to be i8 and changed to i32.
4617 // The type overload for sm4ks and sm4ed was removed.
4618 if (!CI->getType()->isIntegerTy(64))
4619 return;
4620
4621 Value *Arg =
4622 Builder.CreateTrunc(CI->getArgOperand(0), Builder.getInt32Ty());
4623
4624 NewCall = Builder.CreateCall(NewFn, Arg);
4625 Value *Res =
4626 Builder.CreateIntCast(NewCall, CI->getType(), /*isSigned*/ true);
4627 NewCall->takeName(CI);
4628 CI->replaceAllUsesWith(Res);
4629 CI->eraseFromParent();
4630 return;
4631 }
4632
4633 case Intrinsic::x86_xop_vfrcz_ss:
4634 case Intrinsic::x86_xop_vfrcz_sd:
4635 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(1)});
4636 break;
4637
4638 case Intrinsic::x86_xop_vpermil2pd:
4639 case Intrinsic::x86_xop_vpermil2ps:
4640 case Intrinsic::x86_xop_vpermil2pd_256:
4641 case Intrinsic::x86_xop_vpermil2ps_256: {
4642 SmallVector<Value *, 4> Args(CI->args());
4643 VectorType *FltIdxTy = cast<VectorType>(Args[2]->getType());
4644 VectorType *IntIdxTy = VectorType::getInteger(FltIdxTy);
4645 Args[2] = Builder.CreateBitCast(Args[2], IntIdxTy);
4646 NewCall = Builder.CreateCall(NewFn, Args);
4647 break;
4648 }
4649
4650 case Intrinsic::x86_sse41_ptestc:
4651 case Intrinsic::x86_sse41_ptestz:
4652 case Intrinsic::x86_sse41_ptestnzc: {
4653 // The arguments for these intrinsics used to be v4f32, and changed
4654 // to v2i64. This is purely a nop, since those are bitwise intrinsics.
4655 // So, the only thing required is a bitcast for both arguments.
4656 // First, check the arguments have the old type.
4657 Value *Arg0 = CI->getArgOperand(0);
4658 if (Arg0->getType() != FixedVectorType::get(Type::getFloatTy(C), 4))
4659 return;
4660
4661 // Old intrinsic, add bitcasts
4662 Value *Arg1 = CI->getArgOperand(1);
4663
4664 auto *NewVecTy = FixedVectorType::get(Type::getInt64Ty(C), 2);
4665
4666 Value *BC0 = Builder.CreateBitCast(Arg0, NewVecTy, "cast");
4667 Value *BC1 = Builder.CreateBitCast(Arg1, NewVecTy, "cast");
4668
4669 NewCall = Builder.CreateCall(NewFn, {BC0, BC1});
4670 break;
4671 }
4672
4673 case Intrinsic::x86_rdtscp: {
4674 // This used to take 1 arguments. If we have no arguments, it is already
4675 // upgraded.
4676 if (CI->getNumOperands() == 0)
4677 return;
4678
4679 NewCall = Builder.CreateCall(NewFn);
4680 // Extract the second result and store it.
4681 Value *Data = Builder.CreateExtractValue(NewCall, 1);
4682 // Cast the pointer to the right type.
4683 Value *Ptr = Builder.CreateBitCast(CI->getArgOperand(0),
4684 llvm::PointerType::getUnqual(Data->getType()));
4685 Builder.CreateAlignedStore(Data, Ptr, Align(1));
4686 // Replace the original call result with the first result of the new call.
4687 Value *TSC = Builder.CreateExtractValue(NewCall, 0);
4688
4689 NewCall->takeName(CI);
4690 CI->replaceAllUsesWith(TSC);
4691 CI->eraseFromParent();
4692 return;
4693 }
4694
4695 case Intrinsic::x86_sse41_insertps:
4696 case Intrinsic::x86_sse41_dppd:
4697 case Intrinsic::x86_sse41_dpps:
4698 case Intrinsic::x86_sse41_mpsadbw:
4699 case Intrinsic::x86_avx_dp_ps_256:
4700 case Intrinsic::x86_avx2_mpsadbw: {
4701 // Need to truncate the last argument from i32 to i8 -- this argument models
4702 // an inherently 8-bit immediate operand to these x86 instructions.
4703 SmallVector<Value *, 4> Args(CI->args());
4704
4705 // Replace the last argument with a trunc.
4706 Args.back() = Builder.CreateTrunc(Args.back(), Type::getInt8Ty(C), "trunc");
4707 NewCall = Builder.CreateCall(NewFn, Args);
4708 break;
4709 }
4710
4711 case Intrinsic::x86_avx512_mask_cmp_pd_128:
4712 case Intrinsic::x86_avx512_mask_cmp_pd_256:
4713 case Intrinsic::x86_avx512_mask_cmp_pd_512:
4714 case Intrinsic::x86_avx512_mask_cmp_ps_128:
4715 case Intrinsic::x86_avx512_mask_cmp_ps_256:
4716 case Intrinsic::x86_avx512_mask_cmp_ps_512: {
4717 SmallVector<Value *, 4> Args(CI->args());
4718 unsigned NumElts =
4719 cast<FixedVectorType>(Args[0]->getType())->getNumElements();
4720 Args[3] = getX86MaskVec(Builder, Args[3], NumElts);
4721
4722 NewCall = Builder.CreateCall(NewFn, Args);
4723 Value *Res = applyX86MaskOn1BitsVec(Builder, NewCall, nullptr);
4724
4725 NewCall->takeName(CI);
4726 CI->replaceAllUsesWith(Res);
4727 CI->eraseFromParent();
4728 return;
4729 }
4730
4731 case Intrinsic::x86_avx512bf16_cvtne2ps2bf16_128:
4732 case Intrinsic::x86_avx512bf16_cvtne2ps2bf16_256:
4733 case Intrinsic::x86_avx512bf16_cvtne2ps2bf16_512:
4734 case Intrinsic::x86_avx512bf16_mask_cvtneps2bf16_128:
4735 case Intrinsic::x86_avx512bf16_cvtneps2bf16_256:
4736 case Intrinsic::x86_avx512bf16_cvtneps2bf16_512: {
4737 SmallVector<Value *, 4> Args(CI->args());
4738 unsigned NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
4739 if (NewFn->getIntrinsicID() ==
4740 Intrinsic::x86_avx512bf16_mask_cvtneps2bf16_128)
4741 Args[1] = Builder.CreateBitCast(
4742 Args[1], FixedVectorType::get(Builder.getBFloatTy(), NumElts));
4743
4744 NewCall = Builder.CreateCall(NewFn, Args);
4745 Value *Res = Builder.CreateBitCast(
4746 NewCall, FixedVectorType::get(Builder.getInt16Ty(), NumElts));
4747
4748 NewCall->takeName(CI);
4749 CI->replaceAllUsesWith(Res);
4750 CI->eraseFromParent();
4751 return;
4752 }
4753 case Intrinsic::x86_avx512bf16_dpbf16ps_128:
4754 case Intrinsic::x86_avx512bf16_dpbf16ps_256:
4755 case Intrinsic::x86_avx512bf16_dpbf16ps_512:{
4756 SmallVector<Value *, 4> Args(CI->args());
4757 unsigned NumElts =
4758 cast<FixedVectorType>(CI->getType())->getNumElements() * 2;
4759 Args[1] = Builder.CreateBitCast(
4760 Args[1], FixedVectorType::get(Builder.getBFloatTy(), NumElts));
4761 Args[2] = Builder.CreateBitCast(
4762 Args[2], FixedVectorType::get(Builder.getBFloatTy(), NumElts));
4763
4764 NewCall = Builder.CreateCall(NewFn, Args);
4765 break;
4766 }
4767
4768 case Intrinsic::thread_pointer: {
4769 NewCall = Builder.CreateCall(NewFn, {});
4770 break;
4771 }
4772
4773 case Intrinsic::memcpy:
4774 case Intrinsic::memmove:
4775 case Intrinsic::memset: {
4776 // We have to make sure that the call signature is what we're expecting.
4777 // We only want to change the old signatures by removing the alignment arg:
4778 // @llvm.mem[cpy|move]...(i8*, i8*, i[32|i64], i32, i1)
4779 // -> @llvm.mem[cpy|move]...(i8*, i8*, i[32|i64], i1)
4780 // @llvm.memset...(i8*, i8, i[32|64], i32, i1)
4781 // -> @llvm.memset...(i8*, i8, i[32|64], i1)
4782 // Note: i8*'s in the above can be any pointer type
4783 if (CI->arg_size() != 5) {
4784 DefaultCase();
4785 return;
4786 }
4787 // Remove alignment argument (3), and add alignment attributes to the
4788 // dest/src pointers.
4789 Value *Args[4] = {CI->getArgOperand(0), CI->getArgOperand(1),
4790 CI->getArgOperand(2), CI->getArgOperand(4)};
4791 NewCall = Builder.CreateCall(NewFn, Args);
4792 AttributeList OldAttrs = CI->getAttributes();
4794 C, OldAttrs.getFnAttrs(), OldAttrs.getRetAttrs(),
4795 {OldAttrs.getParamAttrs(0), OldAttrs.getParamAttrs(1),
4796 OldAttrs.getParamAttrs(2), OldAttrs.getParamAttrs(4)});
4797 NewCall->setAttributes(NewAttrs);
4798 auto *MemCI = cast<MemIntrinsic>(NewCall);
4799 // All mem intrinsics support dest alignment.
4800 const ConstantInt *Align = cast<ConstantInt>(CI->getArgOperand(3));
4801 MemCI->setDestAlignment(Align->getMaybeAlignValue());
4802 // Memcpy/Memmove also support source alignment.
4803 if (auto *MTI = dyn_cast<MemTransferInst>(MemCI))
4804 MTI->setSourceAlignment(Align->getMaybeAlignValue());
4805 break;
4806 }
4807 }
4808 assert(NewCall && "Should have either set this variable or returned through "
4809 "the default case");
4810 NewCall->takeName(CI);
4811 CI->replaceAllUsesWith(NewCall);
4812 CI->eraseFromParent();
4813}
4814
4816 assert(F && "Illegal attempt to upgrade a non-existent intrinsic.");
4817
4818 // Check if this function should be upgraded and get the replacement function
4819 // if there is one.
4820 Function *NewFn;
4821 if (UpgradeIntrinsicFunction(F, NewFn)) {
4822 // Replace all users of the old function with the new function or new
4823 // instructions. This is not a range loop because the call is deleted.
4824 for (User *U : make_early_inc_range(F->users()))
4825 if (CallBase *CB = dyn_cast<CallBase>(U))
4826 UpgradeIntrinsicCall(CB, NewFn);
4827
4828 // Remove old function, no longer used, from the module.
4829 F->eraseFromParent();
4830 }
4831}
4832
4834 const unsigned NumOperands = MD.getNumOperands();
4835 if (NumOperands == 0)
4836 return &MD; // Invalid, punt to a verifier error.
4837
4838 // Check if the tag uses struct-path aware TBAA format.
4839 if (isa<MDNode>(MD.getOperand(0)) && NumOperands >= 3)
4840 return &MD;
4841
4842 auto &Context = MD.getContext();
4843 if (NumOperands == 3) {
4844 Metadata *Elts[] = {MD.getOperand(0), MD.getOperand(1)};
4845 MDNode *ScalarType = MDNode::get(Context, Elts);
4846 // Create a MDNode <ScalarType, ScalarType, offset 0, const>
4847 Metadata *Elts2[] = {ScalarType, ScalarType,
4850 MD.getOperand(2)};
4851 return MDNode::get(Context, Elts2);
4852 }
4853 // Create a MDNode <MD, MD, offset 0>
4856 return MDNode::get(Context, Elts);
4857}
4858
4860 Instruction *&Temp) {
4861 if (Opc != Instruction::BitCast)
4862 return nullptr;
4863
4864 Temp = nullptr;
4865 Type *SrcTy = V->getType();
4866 if (SrcTy->isPtrOrPtrVectorTy() && DestTy->isPtrOrPtrVectorTy() &&
4867 SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace()) {
4868 LLVMContext &Context = V->getContext();
4869
4870 // We have no information about target data layout, so we assume that
4871 // the maximum pointer size is 64bit.
4872 Type *MidTy = Type::getInt64Ty(Context);
4873 Temp = CastInst::Create(Instruction::PtrToInt, V, MidTy);
4874
4875 return CastInst::Create(Instruction::IntToPtr, Temp, DestTy);
4876 }
4877
4878 return nullptr;
4879}
4880
4882 if (Opc != Instruction::BitCast)
4883 return nullptr;
4884
4885 Type *SrcTy = C->getType();
4886 if (SrcTy->isPtrOrPtrVectorTy() && DestTy->isPtrOrPtrVectorTy() &&
4887 SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace()) {
4888 LLVMContext &Context = C->getContext();
4889
4890 // We have no information about target data layout, so we assume that
4891 // the maximum pointer size is 64bit.
4892 Type *MidTy = Type::getInt64Ty(Context);
4893
4895 DestTy);
4896 }
4897
4898 return nullptr;
4899}
4900
4901/// Check the debug info version number, if it is out-dated, drop the debug
4902/// info. Return true if module is modified.
4905 return false;
4906
4907 unsigned Version = getDebugMetadataVersionFromModule(M);
4908 if (Version == DEBUG_METADATA_VERSION) {
4909 bool BrokenDebugInfo = false;
4910 if (verifyModule(M, &llvm::errs(), &BrokenDebugInfo))
4911 report_fatal_error("Broken module found, compilation aborted!");
4912 if (!BrokenDebugInfo)
4913 // Everything is ok.
4914 return false;
4915 else {
4916 // Diagnose malformed debug info.
4918 M.getContext().diagnose(Diag);
4919 }
4920 }
4921 bool Modified = StripDebugInfo(M);
4922 if (Modified && Version != DEBUG_METADATA_VERSION) {
4923 // Diagnose a version mismatch.
4924 DiagnosticInfoDebugMetadataVersion DiagVersion(M, Version);
4925 M.getContext().diagnose(DiagVersion);
4926 }
4927 return Modified;
4928}
4929
4930/// This checks for objc retain release marker which should be upgraded. It
4931/// returns true if module is modified.
4933 bool Changed = false;
4934 const char *MarkerKey = "clang.arc.retainAutoreleasedReturnValueMarker";
4935 NamedMDNode *ModRetainReleaseMarker = M.getNamedMetadata(MarkerKey);
4936 if (ModRetainReleaseMarker) {
4937 MDNode *Op = ModRetainReleaseMarker->getOperand(0);
4938 if (Op) {
4939 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(0));
4940 if (ID) {
4941 SmallVector<StringRef, 4> ValueComp;
4942 ID->getString().split(ValueComp, "#");
4943 if (ValueComp.size() == 2) {
4944 std::string NewValue = ValueComp[0].str() + ";" + ValueComp[1].str();
4945 ID = MDString::get(M.getContext(), NewValue);
4946 }
4947 M.addModuleFlag(Module::Error, MarkerKey, ID);
4948 M.eraseNamedMetadata(ModRetainReleaseMarker);
4949 Changed = true;
4950 }
4951 }
4952 }
4953 return Changed;
4954}
4955
4957 // This lambda converts normal function calls to ARC runtime functions to
4958 // intrinsic calls.
4959 auto UpgradeToIntrinsic = [&](const char *OldFunc,
4960 llvm::Intrinsic::ID IntrinsicFunc) {
4961 Function *Fn = M.getFunction(OldFunc);
4962
4963 if (!Fn)
4964 return;
4965
4966 Function *NewFn = llvm::Intrinsic::getDeclaration(&M, IntrinsicFunc);
4967
4968 for (User *U : make_early_inc_range(Fn->users())) {
4969 CallInst *CI = dyn_cast<CallInst>(U);
4970 if (!CI || CI->getCalledFunction() != Fn)
4971 continue;
4972
4973 IRBuilder<> Builder(CI->getParent(), CI->getIterator());
4974 FunctionType *NewFuncTy = NewFn->getFunctionType();
4976
4977 // Don't upgrade the intrinsic if it's not valid to bitcast the return
4978 // value to the return type of the old function.
4979 if (NewFuncTy->getReturnType() != CI->getType() &&
4980 !CastInst::castIsValid(Instruction::BitCast, CI,
4981 NewFuncTy->getReturnType()))
4982 continue;
4983
4984 bool InvalidCast = false;
4985
4986 for (unsigned I = 0, E = CI->arg_size(); I != E; ++I) {
4987 Value *Arg = CI->getArgOperand(I);
4988
4989 // Bitcast argument to the parameter type of the new function if it's
4990 // not a variadic argument.
4991 if (I < NewFuncTy->getNumParams()) {
4992 // Don't upgrade the intrinsic if it's not valid to bitcast the argument
4993 // to the parameter type of the new function.
4994 if (!CastInst::castIsValid(Instruction::BitCast, Arg,
4995 NewFuncTy->getParamType(I))) {
4996 InvalidCast = true;
4997 break;
4998 }
4999 Arg = Builder.CreateBitCast(Arg, NewFuncTy->getParamType(I));
5000 }
5001 Args.push_back(Arg);
5002 }
5003
5004 if (InvalidCast)
5005 continue;
5006
5007 // Create a call instruction that calls the new function.
5008 CallInst *NewCall = Builder.CreateCall(NewFuncTy, NewFn, Args);
5009 NewCall->setTailCallKind(cast<CallInst>(CI)->getTailCallKind());
5010 NewCall->takeName(CI);
5011
5012 // Bitcast the return value back to the type of the old call.
5013 Value *NewRetVal = Builder.CreateBitCast(NewCall, CI->getType());
5014
5015 if (!CI->use_empty())
5016 CI->replaceAllUsesWith(NewRetVal);
5017 CI->eraseFromParent();
5018 }
5019
5020 if (Fn->use_empty())
5021 Fn->eraseFromParent();
5022 };
5023
5024 // Unconditionally convert a call to "clang.arc.use" to a call to
5025 // "llvm.objc.clang.arc.use".
5026 UpgradeToIntrinsic("clang.arc.use", llvm::Intrinsic::objc_clang_arc_use);
5027
5028 // Upgrade the retain release marker. If there is no need to upgrade
5029 // the marker, that means either the module is already new enough to contain
5030 // new intrinsics or it is not ARC. There is no need to upgrade runtime call.
5032 return;
5033
5034 std::pair<const char *, llvm::Intrinsic::ID> RuntimeFuncs[] = {
5035 {"objc_autorelease", llvm::Intrinsic::objc_autorelease},
5036 {"objc_autoreleasePoolPop", llvm::Intrinsic::objc_autoreleasePoolPop},
5037 {"objc_autoreleasePoolPush", llvm::Intrinsic::objc_autoreleasePoolPush},
5038 {"objc_autoreleaseReturnValue",
5039 llvm::Intrinsic::objc_autoreleaseReturnValue},
5040 {"objc_copyWeak", llvm::Intrinsic::objc_copyWeak},
5041 {"objc_destroyWeak", llvm::Intrinsic::objc_destroyWeak},
5042 {"objc_initWeak", llvm::Intrinsic::objc_initWeak},
5043 {"objc_loadWeak", llvm::Intrinsic::objc_loadWeak},
5044 {"objc_loadWeakRetained", llvm::Intrinsic::objc_loadWeakRetained},
5045 {"objc_moveWeak", llvm::Intrinsic::objc_moveWeak},
5046 {"objc_release", llvm::Intrinsic::objc_release},
5047 {"objc_retain", llvm::Intrinsic::objc_retain},
5048 {"objc_retainAutorelease", llvm::Intrinsic::objc_retainAutorelease},
5049 {"objc_retainAutoreleaseReturnValue",
5050 llvm::Intrinsic::objc_retainAutoreleaseReturnValue},
5051 {"objc_retainAutoreleasedReturnValue",
5052 llvm::Intrinsic::objc_retainAutoreleasedReturnValue},
5053 {"objc_retainBlock", llvm::Intrinsic::objc_retainBlock},
5054 {"objc_storeStrong", llvm::Intrinsic::objc_storeStrong},
5055 {"objc_storeWeak", llvm::Intrinsic::objc_storeWeak},
5056 {"objc_unsafeClaimAutoreleasedReturnValue",
5057 llvm::Intrinsic::objc_unsafeClaimAutoreleasedReturnValue},
5058 {"objc_retainedObject", llvm::Intrinsic::objc_retainedObject},
5059 {"objc_unretainedObject", llvm::Intrinsic::objc_unretainedObject},
5060 {"objc_unretainedPointer", llvm::Intrinsic::objc_unretainedPointer},
5061 {"objc_retain_autorelease", llvm::Intrinsic::objc_retain_autorelease},
5062 {"objc_sync_enter", llvm::Intrinsic::objc_sync_enter},
5063 {"objc_sync_exit", llvm::Intrinsic::objc_sync_exit},
5064 {"objc_arc_annotation_topdown_bbstart",
5065 llvm::Intrinsic::objc_arc_annotation_topdown_bbstart},
5066 {"objc_arc_annotation_topdown_bbend",
5067 llvm::Intrinsic::objc_arc_annotation_topdown_bbend},
5068 {"objc_arc_annotation_bottomup_bbstart",
5069 llvm::Intrinsic::objc_arc_annotation_bottomup_bbstart},
5070 {"objc_arc_annotation_bottomup_bbend",
5071 llvm::Intrinsic::objc_arc_annotation_bottomup_bbend}};
5072
5073 for (auto &I : RuntimeFuncs)
5074 UpgradeToIntrinsic(I.first, I.second);
5075}
5076
5078 NamedMDNode *ModFlags = M.getModuleFlagsMetadata();
5079 if (!ModFlags)
5080 return false;
5081
5082 bool HasObjCFlag = false, HasClassProperties = false, Changed = false;
5083 bool HasSwiftVersionFlag = false;
5084 uint8_t SwiftMajorVersion, SwiftMinorVersion;
5085 uint32_t SwiftABIVersion;
5086 auto Int8Ty = Type::getInt8Ty(M.getContext());
5087 auto Int32Ty = Type::getInt32Ty(M.getContext());
5088
5089 for (unsigned I = 0, E = ModFlags->getNumOperands(); I != E; ++I) {
5090 MDNode *Op = ModFlags->getOperand(I);
5091 if (Op->getNumOperands() != 3)
5092 continue;
5093 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
5094 if (!ID)
5095 continue;
5096 auto SetBehavior = [&](Module::ModFlagBehavior B) {
5097 Metadata *Ops[3] = {ConstantAsMetadata::get(ConstantInt::get(
5098 Type::getInt32Ty(M.getContext()), B)),
5099 MDString::get(M.getContext(), ID->getString()),
5100 Op->getOperand(2)};
5101 ModFlags->setOperand(I, MDNode::get(M.getContext(), Ops));
5102 Changed = true;
5103 };
5104
5105 if (ID->getString() == "Objective-C Image Info Version")
5106 HasObjCFlag = true;
5107 if (ID->getString() == "Objective-C Class Properties")
5108 HasClassProperties = true;
5109 // Upgrade PIC from Error/Max to Min.
5110 if (ID->getString() == "PIC Level") {
5111 if (auto *Behavior =
5112 mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0))) {
5113 uint64_t V = Behavior->getLimitedValue();
5114 if (V == Module::Error || V == Module::Max)
5115 SetBehavior(Module::Min);
5116 }
5117 }
5118 // Upgrade "PIE Level" from Error to Max.
5119 if (ID->getString() == "PIE Level")
5120 if (auto *Behavior =
5121 mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0)))
5122 if (Behavior->getLimitedValue() == Module::Error)
5123 SetBehavior(Module::Max);
5124
5125 // Upgrade branch protection and return address signing module flags. The
5126 // module flag behavior for these fields were Error and now they are Min.
5127 if (ID->getString() == "branch-target-enforcement" ||
5128 ID->getString().starts_with("sign-return-address")) {
5129 if (auto *Behavior =
5130 mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0))) {
5131 if (Behavior->getLimitedValue() == Module::Error) {
5132 Type *Int32Ty = Type::getInt32Ty(M.getContext());
5133 Metadata *Ops[3] = {
5134 ConstantAsMetadata::get(ConstantInt::get(Int32Ty, Module::Min)),
5135 Op->getOperand(1), Op->getOperand(2)};
5136 ModFlags->setOperand(I, MDNode::get(M.getContext(), Ops));
5137 Changed = true;
5138 }
5139 }
5140 }
5141
5142 // Upgrade Objective-C Image Info Section. Removed the whitespce in the
5143 // section name so that llvm-lto will not complain about mismatching
5144 // module flags that is functionally the same.
5145 if (ID->getString() == "Objective-C Image Info Section") {
5146 if (auto *Value = dyn_cast_or_null<MDString>(Op->getOperand(2))) {
5147 SmallVector<StringRef, 4> ValueComp;
5148 Value->getString().split(ValueComp, " ");
5149 if (ValueComp.size() != 1) {
5150 std::string NewValue;
5151 for (auto &S : ValueComp)
5152 NewValue += S.str();
5153 Metadata *Ops[3] = {Op->getOperand(0), Op->getOperand(1),
5154 MDString::get(M.getContext(), NewValue)};
5155 ModFlags->setOperand(I, MDNode::get(M.getContext(), Ops));
5156 Changed = true;
5157 }
5158 }
5159 }
5160
5161 // IRUpgrader turns a i32 type "Objective-C Garbage Collection" into i8 value.
5162 // If the higher bits are set, it adds new module flag for swift info.
5163 if (ID->getString() == "Objective-C Garbage Collection") {
5164 auto Md = dyn_cast<ConstantAsMetadata>(Op->getOperand(2));
5165 if (Md) {
5166 assert(Md->getValue() && "Expected non-empty metadata");
5167 auto Type = Md->getValue()->getType();
5168 if (Type == Int8Ty)
5169 continue;
5170 unsigned Val = Md->getValue()->getUniqueInteger().getZExtValue();
5171 if ((Val & 0xff) != Val) {
5172 HasSwiftVersionFlag = true;
5173 SwiftABIVersion = (Val & 0xff00) >> 8;
5174 SwiftMajorVersion = (Val & 0xff000000) >> 24;
5175 SwiftMinorVersion = (Val & 0xff0000) >> 16;
5176 }
5177 Metadata *Ops[3] = {
5179 Op->getOperand(1),
5180 ConstantAsMetadata::get(ConstantInt::get(Int8Ty,Val & 0xff))};
5181 ModFlags->setOperand(I, MDNode::get(M.getContext(), Ops));
5182 Changed = true;
5183 }
5184 }
5185
5186 if (ID->getString() == "amdgpu_code_object_version") {
5187 Metadata *Ops[3] = {
5188 Op->getOperand(0),
5189 MDString::get(M.getContext(), "amdhsa_code_object_version"),
5190 Op->getOperand(2)};
5191 ModFlags->setOperand(I, MDNode::get(M.getContext(), Ops));
5192 Changed = true;
5193 }
5194 }
5195
5196 // "Objective-C Class Properties" is recently added for Objective-C. We
5197 // upgrade ObjC bitcodes to contain a "Objective-C Class Properties" module
5198 // flag of value 0, so we can correclty downgrade this flag when trying to
5199 // link an ObjC bitcode without this module flag with an ObjC bitcode with
5200 // this module flag.
5201 if (HasObjCFlag && !HasClassProperties) {
5202 M.addModuleFlag(llvm::Module::Override, "Objective-C Class Properties",
5203 (uint32_t)0);
5204 Changed = true;
5205 }
5206
5207 if (HasSwiftVersionFlag) {
5208 M.addModuleFlag(Module::Error, "Swift ABI Version",
5209 SwiftABIVersion);
5210 M.addModuleFlag(Module::Error, "Swift Major Version",
5211 ConstantInt::get(Int8Ty, SwiftMajorVersion));
5212 M.addModuleFlag(Module::Error, "Swift Minor Version",
5213 ConstantInt::get(Int8Ty, SwiftMinorVersion));
5214 Changed = true;
5215 }
5216
5217 return Changed;
5218}
5219
5221 auto TrimSpaces = [](StringRef Section) -> std::string {
5222 SmallVector<StringRef, 5> Components;
5223 Section.split(Components, ',');
5224
5225 SmallString<32> Buffer;
5226 raw_svector_ostream OS(Buffer);
5227
5228 for (auto Component : Components)
5229 OS << ',' << Component.trim();
5230
5231 return std::string(OS.str().substr(1));
5232 };
5233
5234 for (auto &GV : M.globals()) {
5235 if (!GV.hasSection())
5236 continue;
5237
5238 StringRef Section = GV.getSection();
5239
5240 if (!Section.starts_with("__DATA, __objc_catlist"))
5241 continue;
5242
5243 // __DATA, __objc_catlist, regular, no_dead_strip
5244 // __DATA,__objc_catlist,regular,no_dead_strip
5245 GV.setSection(TrimSpaces(Section));
5246 }
5247}
5248
5249namespace {
5250// Prior to LLVM 10.0, the strictfp attribute could be used on individual
5251// callsites within a function that did not also have the strictfp attribute.
5252// Since 10.0, if strict FP semantics are needed within a function, the
5253// function must have the strictfp attribute and all calls within the function
5254// must also have the strictfp attribute. This latter restriction is
5255// necessary to prevent unwanted libcall simplification when a function is
5256// being cloned (such as for inlining).
5257//
5258// The "dangling" strictfp attribute usage was only used to prevent constant
5259// folding and other libcall simplification. The nobuiltin attribute on the
5260// callsite has the same effect.
5261struct StrictFPUpgradeVisitor : public InstVisitor<StrictFPUpgradeVisitor> {
5262 StrictFPUpgradeVisitor() = default;
5263
5264 void visitCallBase(CallBase &Call) {
5265 if (!Call.isStrictFP())
5266 return;
5267 if (isa<ConstrainedFPIntrinsic>(&Call))
5268 return;
5269 // If we get here, the caller doesn't have the strictfp attribute
5270 // but this callsite does. Replace the strictfp attribute with nobuiltin.
5271 Call.removeFnAttr(Attribute::StrictFP);
5272 Call.addFnAttr(Attribute::NoBuiltin);
5273 }
5274};
5275} // namespace
5276
5278 // If a function definition doesn't have the strictfp attribute,
5279 // convert any callsite strictfp attributes to nobuiltin.
5280 if (!F.isDeclaration() && !F.hasFnAttribute(Attribute::StrictFP)) {
5281 StrictFPUpgradeVisitor SFPV;
5282 SFPV.visit(F);
5283 }
5284
5285 // Remove all incompatibile attributes from function.
5286 F.removeRetAttrs(AttributeFuncs::typeIncompatible(F.getReturnType()));
5287 for (auto &Arg : F.args())
5288 Arg.removeAttrs(AttributeFuncs::typeIncompatible(Arg.getType()));
5289
5290 // Older versions of LLVM treated an "implicit-section-name" attribute
5291 // similarly to directly setting the section on a Function.
5292 if (Attribute A = F.getFnAttribute("implicit-section-name");
5293 A.isValid() && A.isStringAttribute()) {
5294 F.setSection(A.getValueAsString());
5295 F.removeFnAttr("implicit-section-name");
5296 }
5297}
5298
5299static bool isOldLoopArgument(Metadata *MD) {
5300 auto *T = dyn_cast_or_null<MDTuple>(MD);
5301 if (!T)
5302 return false;
5303 if (T->getNumOperands() < 1)
5304 return false;
5305 auto *S = dyn_cast_or_null<MDString>(T->getOperand(0));
5306 if (!S)
5307 return false;
5308 return S->getString().starts_with("llvm.vectorizer.");
5309}
5310
5312 StringRef OldPrefix = "llvm.vectorizer.";
5313 assert(OldTag.starts_with(OldPrefix) && "Expected old prefix");
5314
5315 if (OldTag == "llvm.vectorizer.unroll")
5316 return MDString::get(C, "llvm.loop.interleave.count");
5317
5318 return MDString::get(
5319 C, (Twine("llvm.loop.vectorize.") + OldTag.drop_front(OldPrefix.size()))
5320 .str());
5321}
5322
5324 auto *T = dyn_cast_or_null<MDTuple>(MD);
5325 if (!T)
5326 return MD;
5327 if (T->getNumOperands() < 1)
5328 return MD;
5329 auto *OldTag = dyn_cast_or_null<MDString>(T->getOperand(0));
5330 if (!OldTag)
5331 return MD;
5332 if (!OldTag->getString().starts_with("llvm.vectorizer."))
5333 return MD;
5334
5335 // This has an old tag. Upgrade it.
5337 Ops.reserve(T->getNumOperands());
5338 Ops.push_back(upgradeLoopTag(T->getContext(), OldTag->getString()));
5339 for (unsigned I = 1, E = T->getNumOperands(); I != E; ++I)
5340 Ops.push_back(T->getOperand(I));
5341
5342 return MDTuple::get(T->getContext(), Ops);
5343}
5344
5346 auto *T = dyn_cast<MDTuple>(&N);
5347 if (!T)
5348 return &N;
5349
5350 if (none_of(T->operands(), isOldLoopArgument))
5351 return &N;
5352
5354 Ops.reserve(T->getNumOperands());
5355 for (Metadata *MD : T->operands())
5357
5358 return MDTuple::get(T->getContext(), Ops);
5359}
5360
5362 Triple T(TT);
5363 // The only data layout upgrades needed for pre-GCN, SPIR or SPIRV are setting
5364 // the address space of globals to 1. This does not apply to SPIRV Logical.
5365 if (((T.isAMDGPU() && !T.isAMDGCN()) ||
5366 (T.isSPIR() || (T.isSPIRV() && !T.isSPIRVLogical()))) &&
5367 !DL.contains("-G") && !DL.starts_with("G")) {
5368 return DL.empty() ? std::string("G1") : (DL + "-G1").str();
5369 }
5370
5371 if (T.isRISCV64()) {
5372 // Make i32 a native type for 64-bit RISC-V.
5373 auto I = DL.find("-n64-");
5374 if (I != StringRef::npos)
5375 return (DL.take_front(I) + "-n32:64-" + DL.drop_front(I + 5)).str();
5376 return DL.str();
5377 }
5378
5379 std::string Res = DL.str();
5380 // AMDGCN data layout upgrades.
5381 if (T.isAMDGCN()) {
5382 // Define address spaces for constants.
5383 if (!DL.contains("-G") && !DL.starts_with("G"))
5384 Res.append(Res.empty() ? "G1" : "-G1");
5385
5386 // Add missing non-integral declarations.
5387 // This goes before adding new address spaces to prevent incoherent string
5388 // values.
5389 if (!DL.contains("-ni") && !DL.starts_with("ni"))
5390 Res.append("-ni:7:8:9");
5391 // Update ni:7 to ni:7:8:9.
5392 if (DL.ends_with("ni:7"))
5393 Res.append(":8:9");
5394 if (DL.ends_with("ni:7:8"))
5395 Res.append(":9");
5396
5397 // Add sizing for address spaces 7 and 8 (fat raw buffers and buffer
5398 // resources) An empty data layout has already been upgraded to G1 by now.
5399 if (!DL.contains("-p7") && !DL.starts_with("p7"))
5400 Res.append("-p7:160:256:256:32");
5401 if (!DL.contains("-p8") && !DL.starts_with("p8"))
5402 Res.append("-p8:128:128");
5403 if (!DL.contains("-p9") && !DL.starts_with("p9"))
5404 Res.append("-p9:192:256:256:32");
5405
5406 return Res;
5407 }
5408
5409 if (!T.isX86())
5410 return Res;
5411
5412 // If the datalayout matches the expected format, add pointer size address
5413 // spaces to the datalayout.
5414 std::string AddrSpaces = "-p270:32:32-p271:32:32-p272:64:64";
5415 if (StringRef Ref = Res; !Ref.contains(AddrSpaces)) {
5417 Regex R("(e-m:[a-z](-p:32:32)?)(-[if]64:.*$)");
5418 if (R.match(Res, &Groups))
5419 Res = (Groups[1] + AddrSpaces + Groups[3]).str();
5420 }
5421
5422 // i128 values need to be 16-byte-aligned. LLVM already called into libgcc
5423 // for i128 operations prior to this being reflected in the data layout, and
5424 // clang mostly produced LLVM IR that already aligned i128 to 16 byte
5425 // boundaries, so although this is a breaking change, the upgrade is expected
5426 // to fix more IR than it breaks.
5427 // Intel MCU is an exception and uses 4-byte-alignment.
5428 if (!T.isOSIAMCU()) {
5429 std::string I128 = "-i128:128";
5430 if (StringRef Ref = Res; !Ref.contains(I128)) {
5432 Regex R("^(e(-[mpi][^-]*)*)((-[^mpi][^-]*)*)$");
5433 if (R.match(Res, &Groups))
5434 Res = (Groups[1] + I128 + Groups[3]).str();
5435 }
5436 }
5437
5438 // For 32-bit MSVC targets, raise the alignment of f80 values to 16 bytes.
5439 // Raising the alignment is safe because Clang did not produce f80 values in
5440 // the MSVC environment before this upgrade was added.
5441 if (T.isWindowsMSVCEnvironment() && !T.isArch64Bit()) {
5442 StringRef Ref = Res;
5443 auto I = Ref.find("-f80:32-");
5444 if (I != StringRef::npos)
5445 Res = (Ref.take_front(I) + "-f80:128-" + Ref.drop_front(I + 8)).str();
5446 }
5447
5448 return Res;
5449}
5450
5452 StringRef FramePointer;
5453 Attribute A = B.getAttribute("no-frame-pointer-elim");
5454 if (A.isValid()) {
5455 // The value can be "true" or "false".
5456 FramePointer = A.getValueAsString() == "true" ? "all" : "none";
5457 B.removeAttribute("no-frame-pointer-elim");
5458 }
5459 if (B.contains("no-frame-pointer-elim-non-leaf")) {
5460 // The value is ignored. "no-frame-pointer-elim"="true" takes priority.
5461 if (FramePointer != "all")
5462 FramePointer = "non-leaf";
5463 B.removeAttribute("no-frame-pointer-elim-non-leaf");
5464 }
5465 if (!FramePointer.empty())
5466 B.addAttribute("frame-pointer", FramePointer);
5467
5468 A = B.getAttribute("null-pointer-is-valid");
5469 if (A.isValid()) {
5470 // The value can be "true" or "false".
5471 bool NullPointerIsValid = A.getValueAsString() == "true";
5472 B.removeAttribute("null-pointer-is-valid");
5473 if (NullPointerIsValid)
5474 B.addAttribute(Attribute::NullPointerIsValid);
5475 }
5476}
5477
5478void llvm::UpgradeOperandBundles(std::vector<OperandBundleDef> &Bundles) {
5479 // clang.arc.attachedcall bundles are now required to have an operand.
5480 // If they don't, it's okay to drop them entirely: when there is an operand,
5481 // the "attachedcall" is meaningful and required, but without an operand,
5482 // it's just a marker NOP. Dropping it merely prevents an optimization.
5483 erase_if(Bundles, [&](OperandBundleDef &OBD) {
5484 return OBD.getTag() == "clang.arc.attachedcall" &&
5485 OBD.inputs().empty();
5486 });
5487}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
unsigned Intr
amdgpu AMDGPU Register Bank Select
static Value * upgradeX86VPERMT2Intrinsics(IRBuilder<> &Builder, CallBase &CI, bool ZeroMask, bool IndexForm)
static Metadata * upgradeLoopArgument(Metadata *MD)
static bool upgradeIntrinsicFunction1(Function *F, Function *&NewFn, bool CanUpgradeDebugIntrinsicsToRecords)
static Value * upgradeX86PSLLDQIntrinsics(IRBuilder<> &Builder, Value *Op, unsigned Shift)
static bool upgradeRetainReleaseMarker(Module &M)
This checks for objc retain release marker which should be upgraded.
static Value * upgradeX86vpcom(IRBuilder<> &Builder, CallBase &CI, unsigned Imm, bool IsSigned)
static Value * upgradeMaskToInt(IRBuilder<> &Builder, CallBase &CI)
static Value * upgradeX86Rotate(IRBuilder<> &Builder, CallBase &CI, bool IsRotateRight)
static Intrinsic::ID shouldUpgradeNVPTXBF16Intrinsic(StringRef Name)
static MDString * upgradeLoopTag(LLVMContext &C, StringRef OldTag)
static bool upgradeX86MaskedFPCompare(Function *F, Intrinsic::ID IID, Function *&NewFn)
Definition: AutoUpgrade.cpp:88
static Value * upgradeX86ALIGNIntrinsics(IRBuilder<> &Builder, Value *Op0, Value *Op1, Value *Shift, Value *Passthru, Value *Mask, bool IsVALIGN)
static Value * upgradeAbs(IRBuilder<> &Builder, CallBase &CI)
static Value * emitX86Select(IRBuilder<> &Builder, Value *Mask, Value *Op0, Value *Op1)
static Value * upgradeMaskedMove(IRBuilder<> &Builder, CallBase &CI)
static bool upgradeX86IntrinsicFunction(Function *F, StringRef Name, Function *&NewFn)
static Value * applyX86MaskOn1BitsVec(IRBuilder<> &Builder, Value *Vec, Value *Mask)
static bool shouldUpgradeX86Intrinsic(Function *F, StringRef Name)
static Value * upgradeX86PSRLDQIntrinsics(IRBuilder<> &Builder, Value *Op, unsigned Shift)
static bool isOldLoopArgument(Metadata *MD)
static Value * upgradeARMIntrinsicCall(StringRef Name, CallBase *CI, Function *F, IRBuilder<> &Builder)
static bool upgradeX86IntrinsicsWith8BitMask(Function *F, Intrinsic::ID IID, Function *&NewFn)
Definition: AutoUpgrade.cpp:72
static Value * upgradeAMDGCNIntrinsicCall(StringRef Name, CallBase *CI, Function *F, IRBuilder<> &Builder)
static Value * upgradeMaskedLoad(IRBuilder<> &Builder, Value *Ptr, Value *Passthru, Value *Mask, bool Aligned)
static bool upgradeX86BF16Intrinsic(Function *F, Intrinsic::ID IID, Function *&NewFn)
Definition: AutoUpgrade.cpp:99
static MDType * unwrapMAVOp(CallBase *CI, unsigned Op)
Helper to unwrap intrinsic call MetadataAsValue operands.
static bool upgradeArmOrAarch64IntrinsicFunction(bool IsArm, Function *F, StringRef Name, Function *&NewFn)
static Value * getX86MaskVec(IRBuilder<> &Builder, Value *Mask, unsigned NumElts)
static Value * emitX86ScalarSelect(IRBuilder<> &Builder, Value *Mask, Value *Op0, Value *Op1)
static Value * upgradeX86ConcatShift(IRBuilder<> &Builder, CallBase &CI, bool IsShiftRight, bool ZeroMask)
static void rename(GlobalValue *GV)
Definition: AutoUpgrade.cpp:52
static bool upgradePTESTIntrinsic(Function *F, Intrinsic::ID IID, Function *&NewFn)
Definition: AutoUpgrade.cpp:56
static bool upgradeX86BF16DPIntrinsic(Function *F, Intrinsic::ID IID, Function *&NewFn)
static cl::opt< bool > DisableAutoUpgradeDebugInfo("disable-auto-upgrade-debug-info", cl::desc("Disable autoupgrade of debug info"))
static Value * upgradeMaskedCompare(IRBuilder<> &Builder, CallBase &CI, unsigned CC, bool Signed)
static Value * upgradeX86BinaryIntrinsics(IRBuilder<> &Builder, CallBase &CI, Intrinsic::ID IID)
static Value * upgradeX86MaskedShift(IRBuilder<> &Builder, CallBase &CI, Intrinsic::ID IID)
static bool upgradeAVX512MaskToSelect(StringRef Name, IRBuilder<> &Builder, CallBase &CI, Value *&Rep)
static void upgradeDbgIntrinsicToDbgRecord(StringRef Name, CallBase *CI)
Convert debug intrinsic calls to non-instruction debug records.
static Value * upgradePMULDQ(IRBuilder<> &Builder, CallBase &CI, bool IsSigned)
static Value * upgradeMaskedStore(IRBuilder<> &Builder, Value *Ptr, Value *Data, Value *Mask, bool Aligned)
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
@ Default
Definition: DwarfDebug.cpp:87
This file contains constants used for implementing Dwarf debug support.
uint64_t Addr
std::string Name
uint64_t Size
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
#define R2(n)
This file contains the declarations for metadata subclasses.
Module.h This file contains the declarations for the Module class.
uint64_t High
IntegerType * Int32Ty
LLVMContext & Context
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
raw_pwrite_stream & OS
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:40
static const X86InstrFMA3Group Groups[]
Value * RHS
Value * LHS
Class for arbitrary precision integers.
Definition: APInt.h:76
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
Class to represent array types.
Definition: DerivedTypes.h:371
Type * getElementType() const
Definition: DerivedTypes.h:384
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:748
void setVolatile(bool V)
Specify whether this is a volatile RMW or not.
Definition: Instructions.h:881
BinOp
This enumeration lists the possible modifications atomicrmw can make.
Definition: Instructions.h:760
@ FAdd
*p = old + v
Definition: Instructions.h:785
@ UIncWrap
Increment one up to a maximum value.
Definition: Instructions.h:800
@ UDecWrap
Decrement one until a minimum value or zero.
Definition: Instructions.h:804
AttributeSet getFnAttrs() const
The function attributes are returned.
static AttributeList get(LLVMContext &C, ArrayRef< std::pair< unsigned, Attribute > > Attrs)
Create an AttributeList with the specified parameters in it.
AttributeSet getRetAttrs() const
The attributes for the ret value are returned.
void insertDbgRecordBefore(DbgRecord *DR, InstListType::iterator Here)
Insert a DbgRecord into a block at the position given by Here.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1494
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Definition: InstrTypes.h:1742
Value * getCalledOperand() const
Definition: InstrTypes.h:1735
void setAttributes(AttributeList A)
Set the parameter attributes for this call.
Definition: InstrTypes.h:1823
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1687
FunctionType * getFunctionType() const
Definition: InstrTypes.h:1600
Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
Definition: InstrTypes.h:1678
void setCalledOperand(Value *V)
Definition: InstrTypes.h:1778
unsigned arg_size() const
Definition: InstrTypes.h:1685
AttributeList getAttributes() const
Return the parameter attributes for this call.
Definition: InstrTypes.h:1819
void setCalledFunction(Function *Fn)
Sets the function called, including updating the function type.
Definition: InstrTypes.h:1781
This class represents a function call, abstracting a target machine's calling convention.
void setTailCallKind(TailCallKind TCK)
static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name, BasicBlock::iterator InsertBefore)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
static bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:993
static ConstantAggregateZero * get(Type *Ty)
Definition: Constants.cpp:1663
static Constant * get(ArrayType *T, ArrayRef< Constant * > V)
Definition: Constants.cpp:1291
static ConstantAsMetadata * get(Constant *C)
Definition: Metadata.h:528
static Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2126
static Constant * getPointerCast(Constant *C, Type *Ty)
Create a BitCast, AddrSpaceCast, or a PtrToInt cast constant expression.
Definition: Constants.cpp:2072
static Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2112
This is the shared class of boolean and integer constants.
Definition: Constants.h:80
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition: Constants.h:205
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:154
static Constant * get(StructType *T, ArrayRef< Constant * > V)
Definition: Constants.cpp:1356
static ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
Definition: Constants.cpp:1499
This is an important base class in LLVM.
Definition: Constant.h:41
static Constant * getAllOnesValue(Type *Ty)
Definition: Constants.cpp:417
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:370
DWARF expression.
static DIExpression * append(const DIExpression *Expr, ArrayRef< uint64_t > Ops)
Append the opcodes Ops to DIExpr.
This class represents an Operation in the Expression.
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
Record of a variable value-assignment, aka a non instruction representation of the dbg....
Diagnostic information for debug metadata version reporting.
Diagnostic information for stripping invalid debug metadata.
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
Definition: Type.cpp:692
Class to represent function types.
Definition: DerivedTypes.h:103
Type * getParamType(unsigned i) const
Parameter type accessors.
Definition: DerivedTypes.h:135
Type * getReturnType() const
Definition: DerivedTypes.h:124
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Definition: Function.h:164
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition: Function.h:202
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition: Function.h:232
void eraseFromParent()
eraseFromParent - This method unlinks 'this' from the containing module and deletes it.
Definition: Function.cpp:403
size_t arg_size() const
Definition: Function.h:851
Type * getReturnType() const
Returns the type of the ret val.
Definition: Function.h:207
Argument * getArg(unsigned i) const
Definition: Function.h:836
LinkageTypes getLinkage() const
Definition: GlobalValue.h:546
Type * getValueType() const
Definition: GlobalValue.h:296
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool hasInitializer() const
Definitions have initializers, declarations don't.
ConstantInt * getInt1(bool V)
Get a constant value representing either true or false.
Definition: IRBuilder.h:461
Value * CreateFSub(Value *L, Value *R, const Twine &Name="", MDNode *FPMD=nullptr)
Definition: IRBuilder.h:1560
Value * CreateInsertElement(Type *VecTy, Value *NewElt, Value *Idx, const Twine &Name="")
Definition: IRBuilder.h:2472
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
Definition: IRBuilder.h:511
Value * CreateInsertValue(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &Name="")
Definition: IRBuilder.h:2523
Value * CreateFDiv(Value *L, Value *R, const Twine &Name="", MDNode *FPMD=nullptr)
Definition: IRBuilder.h:1614
CallInst * CreateInsertVector(Type *DstType, Value *SrcVec, Value *SubVec, Value *Idx, const Twine &Name="")
Create a call to the vector.insert intrinsic.
Definition: IRBuilder.h:1045
Value * CreateSIToFP(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2094
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
Definition: IRBuilder.h:2460
IntegerType * getIntNTy(unsigned N)
Fetch the type representing an N-bit integer.
Definition: IRBuilder.h:539
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Definition: IRBuilder.h:1807
Value * CreateFAdd(Value *L, Value *R, const Twine &Name="", MDNode *FPMD=nullptr)
Definition: IRBuilder.h:1533
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2170
Value * CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name="")
Return a vector value that contains.
Definition: IRBuilder.cpp:1212
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
Definition: IRBuilder.h:2516
CallInst * CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment, Value *Mask, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Load intrinsic.
Definition: IRBuilder.cpp:578
Value * CreateICmpSGE(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2269
Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
Definition: IRBuilder.cpp:1110
Value * CreateSExt(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2033
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Definition: IRBuilder.h:526
ConstantInt * getInt8(uint8_t C)
Get a constant 8-bit value.
Definition: IRBuilder.h:476
Value * CreateUIToFP(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Definition: IRBuilder.h:2081
IntegerType * getInt16Ty()
Fetch the type representing a 16-bit integer.
Definition: IRBuilder.h:521
Value * CreateNeg(Value *V, const Twine &Name="", bool HasNSW=false)
Definition: IRBuilder.h:1721
Value * CreateICmpSLE(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2277
Value * CreateNot(Value *V, const Twine &Name="")
Definition: IRBuilder.h:1749
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2241
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1344
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2127
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition: IRBuilder.h:1790
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1416
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Definition: IRBuilder.h:2021
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
Definition: IRBuilder.h:2494
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1475
CallInst * CreateMaskedStore(Value *Val, Value *Ptr, Align Alignment, Value *Mask)
Create a call to Masked Store intrinsic.
Definition: IRBuilder.cpp:598
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1327
ConstantInt * getFalse()
Get the constant value for i1 false.
Definition: IRBuilder.h:471
Value * CreateIsNotNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg != 0.
Definition: IRBuilder.h:2549
AtomicRMWInst * CreateAtomicRMW(AtomicRMWInst::BinOp Op, Value *Ptr, Value *Val, MaybeAlign Align, AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Definition: IRBuilder.h:1854
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
Definition: IRBuilder.h:2007
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1497
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
Definition: IRBuilder.h:569
Value * CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2253
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Definition: IRBuilder.h:2196
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:180
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
Definition: IRBuilder.h:1826
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2412
Value * CreateAShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1456
Value * CreateFPExt(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2110
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1519
Value * CreateICmpULE(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2261
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2351
Value * CreateFMul(Value *L, Value *R, const Twine &Name="", MDNode *FPMD=nullptr)
Definition: IRBuilder.h:1587
Value * CreateFNeg(Value *V, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:1730
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Definition: IRBuilder.h:516
Type * getBFloatTy()
Fetch the type representing a 16-bit brain floating point value.
Definition: IRBuilder.h:549
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1361
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2666
Base class for instruction visitors.
Definition: InstVisitor.h:78
RetTy visitCallBase(CallBase &I)
Definition: InstVisitor.h:267
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:454
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:83
const BasicBlock * getParent() const
Definition: Instruction.h:152
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
Definition: Metadata.cpp:1636
unsigned getBitWidth() const
Get the number of bits in this IntegerType.
Definition: DerivedTypes.h:72
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
An instruction for reading from memory.
Definition: Instructions.h:184
Metadata node.
Definition: Metadata.h:1067
const MDOperand & getOperand(unsigned I) const
Definition: Metadata.h:1428
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1541
unsigned getNumOperands() const
Return number of MDNode operands.
Definition: Metadata.h:1434
LLVMContext & getContext() const
Definition: Metadata.h:1231
A single uniqued string.
Definition: Metadata.h:720
static MDString * get(LLVMContext &Context, StringRef Str)
Definition: Metadata.cpp:600
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1498
Metadata wrapper in the Value hierarchy.
Definition: Metadata.h:176
static MetadataAsValue * get(LLVMContext &Context, Metadata *MD)
Definition: Metadata.cpp:103
Root of the metadata hierarchy.
Definition: Metadata.h:62
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
ModFlagBehavior
This enumeration defines the supported behaviors of module flags.
Definition: Module.h:115
@ Override
Uses the specified value, regardless of the behavior or value of the other module.
Definition: Module.h:136
@ Error
Emits an error if two values disagree, otherwise the resulting value is that of the operands.
Definition: Module.h:118
@ Min
Takes the min of the two values, which are required to be integers.
Definition: Module.h:150
@ Max
Takes the max of the two values, which are required to be integers.
Definition: Module.h:147
bool IsNewDbgInfoFormat
Is this Module using intrinsics to record the position of debugging information, or non-intrinsic rec...
Definition: Module.h:219
A tuple of MDNodes.
Definition: Metadata.h:1729
void setOperand(unsigned I, MDNode *New)
Definition: Metadata.cpp:1390
MDNode * getOperand(unsigned i) const
Definition: Metadata.cpp:1382
unsigned getNumOperands() const
Definition: Metadata.cpp:1378
A container for an operand bundle being viewed as a set of values rather than a set of uses.
Definition: InstrTypes.h:1447
ArrayRef< InputTy > inputs() const
Definition: InstrTypes.h:1462
StringRef getTag() const
Definition: InstrTypes.h:1470
Class to represent pointers.
Definition: DerivedTypes.h:646
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
Definition: DerivedTypes.h:662
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1827
bool match(StringRef String, SmallVectorImpl< StringRef > *Matches=nullptr, std::string *Error=nullptr) const
matches - Match the regex against a given String.
Definition: Regex.cpp:83
Class to represent scalable SIMD vectors.
Definition: DerivedTypes.h:586
uint64_t getMinNumElements() const
Get the minimum number of elements in this vector.
Definition: DerivedTypes.h:634
ArrayRef< int > getShuffleMask() const
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:26
size_t size() const
Definition: SmallVector.h:91
void reserve(size_type N)
Definition: SmallVector.h:676
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
An instruction for storing to memory.
Definition: Instructions.h:317
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:257
constexpr bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:134
StringRef drop_front(size_t N=1) const
Return a StringRef equal to 'this' but with the first N elements dropped.
Definition: StringRef.h:601
constexpr size_t size() const
size - Get the string size.
Definition: StringRef.h:137
static constexpr size_t npos
Definition: StringRef.h:52
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:44
StringSwitch & Case(StringLiteral S, T Value)
Definition: StringSwitch.h:69
R Default(T Value)
Definition: StringSwitch.h:182
StringSwitch & StartsWith(StringLiteral S, T Value)
Definition: StringSwitch.h:83
StringSwitch & Cases(StringLiteral S0, StringLiteral S1, T Value)
Definition: StringSwitch.h:90
Class to represent struct types.
Definition: DerivedTypes.h:216
static StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Definition: Type.cpp:373
unsigned getNumElements() const
Random access to the elements.
Definition: DerivedTypes.h:341
Type * getElementType(unsigned N) const
Definition: DerivedTypes.h:342
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
static Type * getHalfTy(LLVMContext &C)
static Type * getBFloatTy(LLVMContext &C)
bool isBFloatTy() const
Return true if this is 'bfloat', a 16-bit bfloat type.
Definition: Type.h:146
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
static IntegerType * getInt8Ty(LLVMContext &C)
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition: Type.h:262
static IntegerType * getInt32Ty(LLVMContext &C)
static IntegerType * getInt64Ty(LLVMContext &C)
static Type * getFloatTy(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:228
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition: Type.h:216
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition: Type.h:348
Value * getOperand(unsigned i) const
Definition: User.h:169
unsigned getNumOperands() const
Definition: User.h:191
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:377
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:534
iterator_range< user_iterator > users()
Definition: Value.h:421
bool use_empty() const
Definition: Value.h:344
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1074
bool hasName() const
Definition: Value.h:261
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:383
Base class of all SIMD vector types.
Definition: DerivedTypes.h:403
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:187
self_iterator getIterator()
Definition: ilist_node.h:109
A raw_ostream that writes to an SmallVector or SmallString.
Definition: raw_ostream.h:690
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
AttributeMask typeIncompatible(Type *Ty, AttributeSafetyKind ASK=ASK_ALL)
Which attributes cannot be applied to a type.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
void getIntrinsicInfoTableEntries(ID id, SmallVectorImpl< IITDescriptor > &T)
Return the IIT table descriptor for the specified intrinsic into an array of IITDescriptors.
Definition: Function.cpp:1313
std::optional< Function * > remangleIntrinsicFunction(Function *F)
Definition: Function.cpp:1777
StringRef getName(ID id)
Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
Definition: Function.cpp:1027
AttributeList getAttributes(LLVMContext &C, ID id)
Return the attributes for an intrinsic.
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1469
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:456
void UpgradeIntrinsicCall(CallBase *CB, Function *NewFn)
This is the complement to the above, replacing a specific call to an intrinsic function with a call t...
void UpgradeSectionAttributes(Module &M)
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition: STLExtras.h:1680
void UpgradeInlineAsmString(std::string *AsmStr)
Upgrade comment in call to inline asm that represents an objc retain release marker.
bool isValidAtomicOrdering(Int I)
bool UpgradeIntrinsicFunction(Function *F, Function *&NewFn, bool CanUpgradeDebugIntrinsicsToRecords=true)
This is a more granular function that simply checks an intrinsic function for upgrading,...
MDNode * upgradeInstructionLoopAttachment(MDNode &N)
Upgrade the loop attachment metadata node.
void UpgradeAttributes(AttrBuilder &B)
Upgrade attributes that changed format or kind.
void UpgradeCallsToIntrinsic(Function *F)
This is an auto-upgrade hook for any old intrinsic function syntaxes which need to have both the func...
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:656
bool UpgradeModuleFlags(Module &M)
This checks for module flags which should be upgraded.
Op::Description Desc
void UpgradeOperandBundles(std::vector< OperandBundleDef > &OperandBundles)
Upgrade operand bundles (without knowing about their user instruction).
Constant * UpgradeBitCastExpr(unsigned Opc, Constant *C, Type *DestTy)
This is an auto-upgrade for bitcast constant expression between pointers with different address space...
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:275
std::string UpgradeDataLayoutString(StringRef DL, StringRef Triple)
Upgrade the datalayout string by adding a section for address space pointers.
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1736
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:156
GlobalVariable * UpgradeGlobalVariable(GlobalVariable *GV)
This checks for global variables which should be upgraded.
unsigned getDebugMetadataVersionFromModule(const Module &M)
Return Debug Info Metadata Version by checking module flags.
Definition: DebugInfo.cpp:928
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
bool StripDebugInfo(Module &M)
Strip debug info in the module if it exists.
Definition: DebugInfo.cpp:594
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ Ref
The access may reference the value stored in memory.
Instruction * UpgradeBitCastInst(unsigned Opc, Value *V, Type *DestTy, Instruction *&Temp)
This is an auto-upgrade for bitcast between pointers with different address spaces: the instruction i...
@ Dynamic
Denotes mode unknown at compile time.
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition: STLExtras.h:2051
bool UpgradeDebugInfo(Module &M)
Check the debug info version number, if it is out-dated, drop the debug info.
void UpgradeFunctionAttributes(Function &F)
Correct any IR that is relying on old function attribute behavior.
MDNode * UpgradeTBAANode(MDNode &TBAANode)
If the given TBAA tag uses the scalar TBAA format, create a new node corresponding to the upgrade to ...
void UpgradeARCRuntime(Module &M)
Convert calls to ARC runtime functions to intrinsic calls and upgrade the old retain release marker t...
@ DEBUG_METADATA_VERSION
Definition: Metadata.h:52
bool verifyModule(const Module &M, raw_ostream *OS=nullptr, bool *BrokenDebugInfo=nullptr)
Check a module for errors.
Definition: Verifier.cpp:7073
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Description of the encoding of one expression Op.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117