LLVM 19.0.0git
AutoUpgrade.cpp
Go to the documentation of this file.
1//===-- AutoUpgrade.cpp - Implement auto-upgrade helper functions ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the auto-upgrade helper functions.
10// This is where deprecated IR intrinsics and other IR features are updated to
11// current specifications.
12//
13//===----------------------------------------------------------------------===//
14
15#include "llvm/IR/AutoUpgrade.h"
16#include "llvm/ADT/StringRef.h"
20#include "llvm/IR/Constants.h"
21#include "llvm/IR/DebugInfo.h"
24#include "llvm/IR/Function.h"
25#include "llvm/IR/IRBuilder.h"
26#include "llvm/IR/InstVisitor.h"
27#include "llvm/IR/Instruction.h"
29#include "llvm/IR/Intrinsics.h"
30#include "llvm/IR/IntrinsicsAArch64.h"
31#include "llvm/IR/IntrinsicsARM.h"
32#include "llvm/IR/IntrinsicsNVPTX.h"
33#include "llvm/IR/IntrinsicsRISCV.h"
34#include "llvm/IR/IntrinsicsWebAssembly.h"
35#include "llvm/IR/IntrinsicsX86.h"
36#include "llvm/IR/LLVMContext.h"
37#include "llvm/IR/Metadata.h"
38#include "llvm/IR/Module.h"
39#include "llvm/IR/Verifier.h"
42#include "llvm/Support/Regex.h"
44#include <cstring>
45
46using namespace llvm;
47
48static cl::opt<bool>
49 DisableAutoUpgradeDebugInfo("disable-auto-upgrade-debug-info",
50 cl::desc("Disable autoupgrade of debug info"));
51
52static void rename(GlobalValue *GV) { GV->setName(GV->getName() + ".old"); }
53
54// Upgrade the declarations of the SSE4.1 ptest intrinsics whose arguments have
55// changed their type from v4f32 to v2i64.
57 Function *&NewFn) {
58 // Check whether this is an old version of the function, which received
59 // v4f32 arguments.
60 Type *Arg0Type = F->getFunctionType()->getParamType(0);
61 if (Arg0Type != FixedVectorType::get(Type::getFloatTy(F->getContext()), 4))
62 return false;
63
64 // Yes, it's old, replace it with new version.
65 rename(F);
66 NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
67 return true;
68}
69
70// Upgrade the declarations of intrinsic functions whose 8-bit immediate mask
71// arguments have changed their type from i32 to i8.
73 Function *&NewFn) {
74 // Check that the last argument is an i32.
75 Type *LastArgType = F->getFunctionType()->getParamType(
76 F->getFunctionType()->getNumParams() - 1);
77 if (!LastArgType->isIntegerTy(32))
78 return false;
79
80 // Move this function aside and map down.
81 rename(F);
82 NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
83 return true;
84}
85
86// Upgrade the declaration of fp compare intrinsics that change return type
87// from scalar to vXi1 mask.
89 Function *&NewFn) {
90 // Check if the return type is a vector.
91 if (F->getReturnType()->isVectorTy())
92 return false;
93
94 rename(F);
95 NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
96 return true;
97}
98
100 Function *&NewFn) {
101 if (F->getReturnType()->getScalarType()->isBFloatTy())
102 return false;
103
104 rename(F);
105 NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
106 return true;
107}
108
110 Function *&NewFn) {
111 if (F->getFunctionType()->getParamType(1)->getScalarType()->isBFloatTy())
112 return false;
113
114 rename(F);
115 NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
116 return true;
117}
118
120 // All of the intrinsics matches below should be marked with which llvm
121 // version started autoupgrading them. At some point in the future we would
122 // like to use this information to remove upgrade code for some older
123 // intrinsics. It is currently undecided how we will determine that future
124 // point.
125 if (Name.consume_front("avx."))
126 return (Name.starts_with("blend.p") || // Added in 3.7
127 Name == "cvt.ps2.pd.256" || // Added in 3.9
128 Name == "cvtdq2.pd.256" || // Added in 3.9
129 Name == "cvtdq2.ps.256" || // Added in 7.0
130 Name.starts_with("movnt.") || // Added in 3.2
131 Name.starts_with("sqrt.p") || // Added in 7.0
132 Name.starts_with("storeu.") || // Added in 3.9
133 Name.starts_with("vbroadcast.s") || // Added in 3.5
134 Name.starts_with("vbroadcastf128") || // Added in 4.0
135 Name.starts_with("vextractf128.") || // Added in 3.7
136 Name.starts_with("vinsertf128.") || // Added in 3.7
137 Name.starts_with("vperm2f128.") || // Added in 6.0
138 Name.starts_with("vpermil.")); // Added in 3.1
139
140 if (Name.consume_front("avx2."))
141 return (Name == "movntdqa" || // Added in 5.0
142 Name.starts_with("pabs.") || // Added in 6.0
143 Name.starts_with("padds.") || // Added in 8.0
144 Name.starts_with("paddus.") || // Added in 8.0
145 Name.starts_with("pblendd.") || // Added in 3.7
146 Name == "pblendw" || // Added in 3.7
147 Name.starts_with("pbroadcast") || // Added in 3.8
148 Name.starts_with("pcmpeq.") || // Added in 3.1
149 Name.starts_with("pcmpgt.") || // Added in 3.1
150 Name.starts_with("pmax") || // Added in 3.9
151 Name.starts_with("pmin") || // Added in 3.9
152 Name.starts_with("pmovsx") || // Added in 3.9
153 Name.starts_with("pmovzx") || // Added in 3.9
154 Name == "pmul.dq" || // Added in 7.0
155 Name == "pmulu.dq" || // Added in 7.0
156 Name.starts_with("psll.dq") || // Added in 3.7
157 Name.starts_with("psrl.dq") || // Added in 3.7
158 Name.starts_with("psubs.") || // Added in 8.0
159 Name.starts_with("psubus.") || // Added in 8.0
160 Name.starts_with("vbroadcast") || // Added in 3.8
161 Name == "vbroadcasti128" || // Added in 3.7
162 Name == "vextracti128" || // Added in 3.7
163 Name == "vinserti128" || // Added in 3.7
164 Name == "vperm2i128"); // Added in 6.0
165
166 if (Name.consume_front("avx512.")) {
167 if (Name.consume_front("mask."))
168 // 'avx512.mask.*'
169 return (Name.starts_with("add.p") || // Added in 7.0. 128/256 in 4.0
170 Name.starts_with("and.") || // Added in 3.9
171 Name.starts_with("andn.") || // Added in 3.9
172 Name.starts_with("broadcast.s") || // Added in 3.9
173 Name.starts_with("broadcastf32x4.") || // Added in 6.0
174 Name.starts_with("broadcastf32x8.") || // Added in 6.0
175 Name.starts_with("broadcastf64x2.") || // Added in 6.0
176 Name.starts_with("broadcastf64x4.") || // Added in 6.0
177 Name.starts_with("broadcasti32x4.") || // Added in 6.0
178 Name.starts_with("broadcasti32x8.") || // Added in 6.0
179 Name.starts_with("broadcasti64x2.") || // Added in 6.0
180 Name.starts_with("broadcasti64x4.") || // Added in 6.0
181 Name.starts_with("cmp.b") || // Added in 5.0
182 Name.starts_with("cmp.d") || // Added in 5.0
183 Name.starts_with("cmp.q") || // Added in 5.0
184 Name.starts_with("cmp.w") || // Added in 5.0
185 Name.starts_with("compress.b") || // Added in 9.0
186 Name.starts_with("compress.d") || // Added in 9.0
187 Name.starts_with("compress.p") || // Added in 9.0
188 Name.starts_with("compress.q") || // Added in 9.0
189 Name.starts_with("compress.store.") || // Added in 7.0
190 Name.starts_with("compress.w") || // Added in 9.0
191 Name.starts_with("conflict.") || // Added in 9.0
192 Name.starts_with("cvtdq2pd.") || // Added in 4.0
193 Name.starts_with("cvtdq2ps.") || // Added in 7.0 updated 9.0
194 Name == "cvtpd2dq.256" || // Added in 7.0
195 Name == "cvtpd2ps.256" || // Added in 7.0
196 Name == "cvtps2pd.128" || // Added in 7.0
197 Name == "cvtps2pd.256" || // Added in 7.0
198 Name.starts_with("cvtqq2pd.") || // Added in 7.0 updated 9.0
199 Name == "cvtqq2ps.256" || // Added in 9.0
200 Name == "cvtqq2ps.512" || // Added in 9.0
201 Name == "cvttpd2dq.256" || // Added in 7.0
202 Name == "cvttps2dq.128" || // Added in 7.0
203 Name == "cvttps2dq.256" || // Added in 7.0
204 Name.starts_with("cvtudq2pd.") || // Added in 4.0
205 Name.starts_with("cvtudq2ps.") || // Added in 7.0 updated 9.0
206 Name.starts_with("cvtuqq2pd.") || // Added in 7.0 updated 9.0
207 Name == "cvtuqq2ps.256" || // Added in 9.0
208 Name == "cvtuqq2ps.512" || // Added in 9.0
209 Name.starts_with("dbpsadbw.") || // Added in 7.0
210 Name.starts_with("div.p") || // Added in 7.0. 128/256 in 4.0
211 Name.starts_with("expand.b") || // Added in 9.0
212 Name.starts_with("expand.d") || // Added in 9.0
213 Name.starts_with("expand.load.") || // Added in 7.0
214 Name.starts_with("expand.p") || // Added in 9.0
215 Name.starts_with("expand.q") || // Added in 9.0
216 Name.starts_with("expand.w") || // Added in 9.0
217 Name.starts_with("fpclass.p") || // Added in 7.0
218 Name.starts_with("insert") || // Added in 4.0
219 Name.starts_with("load.") || // Added in 3.9
220 Name.starts_with("loadu.") || // Added in 3.9
221 Name.starts_with("lzcnt.") || // Added in 5.0
222 Name.starts_with("max.p") || // Added in 7.0. 128/256 in 5.0
223 Name.starts_with("min.p") || // Added in 7.0. 128/256 in 5.0
224 Name.starts_with("movddup") || // Added in 3.9
225 Name.starts_with("move.s") || // Added in 4.0
226 Name.starts_with("movshdup") || // Added in 3.9
227 Name.starts_with("movsldup") || // Added in 3.9
228 Name.starts_with("mul.p") || // Added in 7.0. 128/256 in 4.0
229 Name.starts_with("or.") || // Added in 3.9
230 Name.starts_with("pabs.") || // Added in 6.0
231 Name.starts_with("packssdw.") || // Added in 5.0
232 Name.starts_with("packsswb.") || // Added in 5.0
233 Name.starts_with("packusdw.") || // Added in 5.0
234 Name.starts_with("packuswb.") || // Added in 5.0
235 Name.starts_with("padd.") || // Added in 4.0
236 Name.starts_with("padds.") || // Added in 8.0
237 Name.starts_with("paddus.") || // Added in 8.0
238 Name.starts_with("palignr.") || // Added in 3.9
239 Name.starts_with("pand.") || // Added in 3.9
240 Name.starts_with("pandn.") || // Added in 3.9
241 Name.starts_with("pavg") || // Added in 6.0
242 Name.starts_with("pbroadcast") || // Added in 6.0
243 Name.starts_with("pcmpeq.") || // Added in 3.9
244 Name.starts_with("pcmpgt.") || // Added in 3.9
245 Name.starts_with("perm.df.") || // Added in 3.9
246 Name.starts_with("perm.di.") || // Added in 3.9
247 Name.starts_with("permvar.") || // Added in 7.0
248 Name.starts_with("pmaddubs.w.") || // Added in 7.0
249 Name.starts_with("pmaddw.d.") || // Added in 7.0
250 Name.starts_with("pmax") || // Added in 4.0
251 Name.starts_with("pmin") || // Added in 4.0
252 Name == "pmov.qd.256" || // Added in 9.0
253 Name == "pmov.qd.512" || // Added in 9.0
254 Name == "pmov.wb.256" || // Added in 9.0
255 Name == "pmov.wb.512" || // Added in 9.0
256 Name.starts_with("pmovsx") || // Added in 4.0
257 Name.starts_with("pmovzx") || // Added in 4.0
258 Name.starts_with("pmul.dq.") || // Added in 4.0
259 Name.starts_with("pmul.hr.sw.") || // Added in 7.0
260 Name.starts_with("pmulh.w.") || // Added in 7.0
261 Name.starts_with("pmulhu.w.") || // Added in 7.0
262 Name.starts_with("pmull.") || // Added in 4.0
263 Name.starts_with("pmultishift.qb.") || // Added in 8.0
264 Name.starts_with("pmulu.dq.") || // Added in 4.0
265 Name.starts_with("por.") || // Added in 3.9
266 Name.starts_with("prol.") || // Added in 8.0
267 Name.starts_with("prolv.") || // Added in 8.0
268 Name.starts_with("pror.") || // Added in 8.0
269 Name.starts_with("prorv.") || // Added in 8.0
270 Name.starts_with("pshuf.b.") || // Added in 4.0
271 Name.starts_with("pshuf.d.") || // Added in 3.9
272 Name.starts_with("pshufh.w.") || // Added in 3.9
273 Name.starts_with("pshufl.w.") || // Added in 3.9
274 Name.starts_with("psll.d") || // Added in 4.0
275 Name.starts_with("psll.q") || // Added in 4.0
276 Name.starts_with("psll.w") || // Added in 4.0
277 Name.starts_with("pslli") || // Added in 4.0
278 Name.starts_with("psllv") || // Added in 4.0
279 Name.starts_with("psra.d") || // Added in 4.0
280 Name.starts_with("psra.q") || // Added in 4.0
281 Name.starts_with("psra.w") || // Added in 4.0
282 Name.starts_with("psrai") || // Added in 4.0
283 Name.starts_with("psrav") || // Added in 4.0
284 Name.starts_with("psrl.d") || // Added in 4.0
285 Name.starts_with("psrl.q") || // Added in 4.0
286 Name.starts_with("psrl.w") || // Added in 4.0
287 Name.starts_with("psrli") || // Added in 4.0
288 Name.starts_with("psrlv") || // Added in 4.0
289 Name.starts_with("psub.") || // Added in 4.0
290 Name.starts_with("psubs.") || // Added in 8.0
291 Name.starts_with("psubus.") || // Added in 8.0
292 Name.starts_with("pternlog.") || // Added in 7.0
293 Name.starts_with("punpckh") || // Added in 3.9
294 Name.starts_with("punpckl") || // Added in 3.9
295 Name.starts_with("pxor.") || // Added in 3.9
296 Name.starts_with("shuf.f") || // Added in 6.0
297 Name.starts_with("shuf.i") || // Added in 6.0
298 Name.starts_with("shuf.p") || // Added in 4.0
299 Name.starts_with("sqrt.p") || // Added in 7.0
300 Name.starts_with("store.b.") || // Added in 3.9
301 Name.starts_with("store.d.") || // Added in 3.9
302 Name.starts_with("store.p") || // Added in 3.9
303 Name.starts_with("store.q.") || // Added in 3.9
304 Name.starts_with("store.w.") || // Added in 3.9
305 Name == "store.ss" || // Added in 7.0
306 Name.starts_with("storeu.") || // Added in 3.9
307 Name.starts_with("sub.p") || // Added in 7.0. 128/256 in 4.0
308 Name.starts_with("ucmp.") || // Added in 5.0
309 Name.starts_with("unpckh.") || // Added in 3.9
310 Name.starts_with("unpckl.") || // Added in 3.9
311 Name.starts_with("valign.") || // Added in 4.0
312 Name == "vcvtph2ps.128" || // Added in 11.0
313 Name == "vcvtph2ps.256" || // Added in 11.0
314 Name.starts_with("vextract") || // Added in 4.0
315 Name.starts_with("vfmadd.") || // Added in 7.0
316 Name.starts_with("vfmaddsub.") || // Added in 7.0
317 Name.starts_with("vfnmadd.") || // Added in 7.0
318 Name.starts_with("vfnmsub.") || // Added in 7.0
319 Name.starts_with("vpdpbusd.") || // Added in 7.0
320 Name.starts_with("vpdpbusds.") || // Added in 7.0
321 Name.starts_with("vpdpwssd.") || // Added in 7.0
322 Name.starts_with("vpdpwssds.") || // Added in 7.0
323 Name.starts_with("vpermi2var.") || // Added in 7.0
324 Name.starts_with("vpermil.p") || // Added in 3.9
325 Name.starts_with("vpermilvar.") || // Added in 4.0
326 Name.starts_with("vpermt2var.") || // Added in 7.0
327 Name.starts_with("vpmadd52") || // Added in 7.0
328 Name.starts_with("vpshld.") || // Added in 7.0
329 Name.starts_with("vpshldv.") || // Added in 8.0
330 Name.starts_with("vpshrd.") || // Added in 7.0
331 Name.starts_with("vpshrdv.") || // Added in 8.0
332 Name.starts_with("vpshufbitqmb.") || // Added in 8.0
333 Name.starts_with("xor.")); // Added in 3.9
334
335 if (Name.consume_front("mask3."))
336 // 'avx512.mask3.*'
337 return (Name.starts_with("vfmadd.") || // Added in 7.0
338 Name.starts_with("vfmaddsub.") || // Added in 7.0
339 Name.starts_with("vfmsub.") || // Added in 7.0
340 Name.starts_with("vfmsubadd.") || // Added in 7.0
341 Name.starts_with("vfnmsub.")); // Added in 7.0
342
343 if (Name.consume_front("maskz."))
344 // 'avx512.maskz.*'
345 return (Name.starts_with("pternlog.") || // Added in 7.0
346 Name.starts_with("vfmadd.") || // Added in 7.0
347 Name.starts_with("vfmaddsub.") || // Added in 7.0
348 Name.starts_with("vpdpbusd.") || // Added in 7.0
349 Name.starts_with("vpdpbusds.") || // Added in 7.0
350 Name.starts_with("vpdpwssd.") || // Added in 7.0
351 Name.starts_with("vpdpwssds.") || // Added in 7.0
352 Name.starts_with("vpermt2var.") || // Added in 7.0
353 Name.starts_with("vpmadd52") || // Added in 7.0
354 Name.starts_with("vpshldv.") || // Added in 8.0
355 Name.starts_with("vpshrdv.")); // Added in 8.0
356
357 // 'avx512.*'
358 return (Name == "movntdqa" || // Added in 5.0
359 Name == "pmul.dq.512" || // Added in 7.0
360 Name == "pmulu.dq.512" || // Added in 7.0
361 Name.starts_with("broadcastm") || // Added in 6.0
362 Name.starts_with("cmp.p") || // Added in 12.0
363 Name.starts_with("cvtb2mask.") || // Added in 7.0
364 Name.starts_with("cvtd2mask.") || // Added in 7.0
365 Name.starts_with("cvtmask2") || // Added in 5.0
366 Name.starts_with("cvtq2mask.") || // Added in 7.0
367 Name == "cvtusi2sd" || // Added in 7.0
368 Name.starts_with("cvtw2mask.") || // Added in 7.0
369 Name == "kand.w" || // Added in 7.0
370 Name == "kandn.w" || // Added in 7.0
371 Name == "knot.w" || // Added in 7.0
372 Name == "kor.w" || // Added in 7.0
373 Name == "kortestc.w" || // Added in 7.0
374 Name == "kortestz.w" || // Added in 7.0
375 Name.starts_with("kunpck") || // added in 6.0
376 Name == "kxnor.w" || // Added in 7.0
377 Name == "kxor.w" || // Added in 7.0
378 Name.starts_with("padds.") || // Added in 8.0
379 Name.starts_with("pbroadcast") || // Added in 3.9
380 Name.starts_with("prol") || // Added in 8.0
381 Name.starts_with("pror") || // Added in 8.0
382 Name.starts_with("psll.dq") || // Added in 3.9
383 Name.starts_with("psrl.dq") || // Added in 3.9
384 Name.starts_with("psubs.") || // Added in 8.0
385 Name.starts_with("ptestm") || // Added in 6.0
386 Name.starts_with("ptestnm") || // Added in 6.0
387 Name.starts_with("storent.") || // Added in 3.9
388 Name.starts_with("vbroadcast.s") || // Added in 7.0
389 Name.starts_with("vpshld.") || // Added in 8.0
390 Name.starts_with("vpshrd.")); // Added in 8.0
391 }
392
393 if (Name.consume_front("fma."))
394 return (Name.starts_with("vfmadd.") || // Added in 7.0
395 Name.starts_with("vfmsub.") || // Added in 7.0
396 Name.starts_with("vfmsubadd.") || // Added in 7.0
397 Name.starts_with("vfnmadd.") || // Added in 7.0
398 Name.starts_with("vfnmsub.")); // Added in 7.0
399
400 if (Name.consume_front("fma4."))
401 return Name.starts_with("vfmadd.s"); // Added in 7.0
402
403 if (Name.consume_front("sse."))
404 return (Name == "add.ss" || // Added in 4.0
405 Name == "cvtsi2ss" || // Added in 7.0
406 Name == "cvtsi642ss" || // Added in 7.0
407 Name == "div.ss" || // Added in 4.0
408 Name == "mul.ss" || // Added in 4.0
409 Name.starts_with("sqrt.p") || // Added in 7.0
410 Name == "sqrt.ss" || // Added in 7.0
411 Name.starts_with("storeu.") || // Added in 3.9
412 Name == "sub.ss"); // Added in 4.0
413
414 if (Name.consume_front("sse2."))
415 return (Name == "add.sd" || // Added in 4.0
416 Name == "cvtdq2pd" || // Added in 3.9
417 Name == "cvtdq2ps" || // Added in 7.0
418 Name == "cvtps2pd" || // Added in 3.9
419 Name == "cvtsi2sd" || // Added in 7.0
420 Name == "cvtsi642sd" || // Added in 7.0
421 Name == "cvtss2sd" || // Added in 7.0
422 Name == "div.sd" || // Added in 4.0
423 Name == "mul.sd" || // Added in 4.0
424 Name.starts_with("padds.") || // Added in 8.0
425 Name.starts_with("paddus.") || // Added in 8.0
426 Name.starts_with("pcmpeq.") || // Added in 3.1
427 Name.starts_with("pcmpgt.") || // Added in 3.1
428 Name == "pmaxs.w" || // Added in 3.9
429 Name == "pmaxu.b" || // Added in 3.9
430 Name == "pmins.w" || // Added in 3.9
431 Name == "pminu.b" || // Added in 3.9
432 Name == "pmulu.dq" || // Added in 7.0
433 Name.starts_with("pshuf") || // Added in 3.9
434 Name.starts_with("psll.dq") || // Added in 3.7
435 Name.starts_with("psrl.dq") || // Added in 3.7
436 Name.starts_with("psubs.") || // Added in 8.0
437 Name.starts_with("psubus.") || // Added in 8.0
438 Name.starts_with("sqrt.p") || // Added in 7.0
439 Name == "sqrt.sd" || // Added in 7.0
440 Name == "storel.dq" || // Added in 3.9
441 Name.starts_with("storeu.") || // Added in 3.9
442 Name == "sub.sd"); // Added in 4.0
443
444 if (Name.consume_front("sse41."))
445 return (Name.starts_with("blendp") || // Added in 3.7
446 Name == "movntdqa" || // Added in 5.0
447 Name == "pblendw" || // Added in 3.7
448 Name == "pmaxsb" || // Added in 3.9
449 Name == "pmaxsd" || // Added in 3.9
450 Name == "pmaxud" || // Added in 3.9
451 Name == "pmaxuw" || // Added in 3.9
452 Name == "pminsb" || // Added in 3.9
453 Name == "pminsd" || // Added in 3.9
454 Name == "pminud" || // Added in 3.9
455 Name == "pminuw" || // Added in 3.9
456 Name.starts_with("pmovsx") || // Added in 3.8
457 Name.starts_with("pmovzx") || // Added in 3.9
458 Name == "pmuldq"); // Added in 7.0
459
460 if (Name.consume_front("sse42."))
461 return Name == "crc32.64.8"; // Added in 3.4
462
463 if (Name.consume_front("sse4a."))
464 return Name.starts_with("movnt."); // Added in 3.9
465
466 if (Name.consume_front("ssse3."))
467 return (Name == "pabs.b.128" || // Added in 6.0
468 Name == "pabs.d.128" || // Added in 6.0
469 Name == "pabs.w.128"); // Added in 6.0
470
471 if (Name.consume_front("xop."))
472 return (Name == "vpcmov" || // Added in 3.8
473 Name == "vpcmov.256" || // Added in 5.0
474 Name.starts_with("vpcom") || // Added in 3.2, Updated in 9.0
475 Name.starts_with("vprot")); // Added in 8.0
476
477 return (Name == "addcarry.u32" || // Added in 8.0
478 Name == "addcarry.u64" || // Added in 8.0
479 Name == "addcarryx.u32" || // Added in 8.0
480 Name == "addcarryx.u64" || // Added in 8.0
481 Name == "subborrow.u32" || // Added in 8.0
482 Name == "subborrow.u64" || // Added in 8.0
483 Name.starts_with("vcvtph2ps.")); // Added in 11.0
484}
485
487 Function *&NewFn) {
488 // Only handle intrinsics that start with "x86.".
489 if (!Name.consume_front("x86."))
490 return false;
491
493 NewFn = nullptr;
494 return true;
495 }
496
497 if (Name == "rdtscp") { // Added in 8.0
498 // If this intrinsic has 0 operands, it's the new version.
499 if (F->getFunctionType()->getNumParams() == 0)
500 return false;
501
502 rename(F);
503 NewFn = Intrinsic::getDeclaration(F->getParent(),
504 Intrinsic::x86_rdtscp);
505 return true;
506 }
507
509
510 // SSE4.1 ptest functions may have an old signature.
511 if (Name.consume_front("sse41.ptest")) { // Added in 3.2
513 .Case("c", Intrinsic::x86_sse41_ptestc)
514 .Case("z", Intrinsic::x86_sse41_ptestz)
515 .Case("nzc", Intrinsic::x86_sse41_ptestnzc)
518 return upgradePTESTIntrinsic(F, ID, NewFn);
519
520 return false;
521 }
522
523 // Several blend and other instructions with masks used the wrong number of
524 // bits.
525
526 // Added in 3.6
528 .Case("sse41.insertps", Intrinsic::x86_sse41_insertps)
529 .Case("sse41.dppd", Intrinsic::x86_sse41_dppd)
530 .Case("sse41.dpps", Intrinsic::x86_sse41_dpps)
531 .Case("sse41.mpsadbw", Intrinsic::x86_sse41_mpsadbw)
532 .Case("avx.dp.ps.256", Intrinsic::x86_avx_dp_ps_256)
533 .Case("avx2.mpsadbw", Intrinsic::x86_avx2_mpsadbw)
536 return upgradeX86IntrinsicsWith8BitMask(F, ID, NewFn);
537
538 if (Name.consume_front("avx512.mask.cmp.")) {
539 // Added in 7.0
541 .Case("pd.128", Intrinsic::x86_avx512_mask_cmp_pd_128)
542 .Case("pd.256", Intrinsic::x86_avx512_mask_cmp_pd_256)
543 .Case("pd.512", Intrinsic::x86_avx512_mask_cmp_pd_512)
544 .Case("ps.128", Intrinsic::x86_avx512_mask_cmp_ps_128)
545 .Case("ps.256", Intrinsic::x86_avx512_mask_cmp_ps_256)
546 .Case("ps.512", Intrinsic::x86_avx512_mask_cmp_ps_512)
549 return upgradeX86MaskedFPCompare(F, ID, NewFn);
550 return false; // No other 'x86.avx523.mask.cmp.*'.
551 }
552
553 if (Name.consume_front("avx512bf16.")) {
554 // Added in 9.0
556 .Case("cvtne2ps2bf16.128",
557 Intrinsic::x86_avx512bf16_cvtne2ps2bf16_128)
558 .Case("cvtne2ps2bf16.256",
559 Intrinsic::x86_avx512bf16_cvtne2ps2bf16_256)
560 .Case("cvtne2ps2bf16.512",
561 Intrinsic::x86_avx512bf16_cvtne2ps2bf16_512)
562 .Case("mask.cvtneps2bf16.128",
563 Intrinsic::x86_avx512bf16_mask_cvtneps2bf16_128)
564 .Case("cvtneps2bf16.256",
565 Intrinsic::x86_avx512bf16_cvtneps2bf16_256)
566 .Case("cvtneps2bf16.512",
567 Intrinsic::x86_avx512bf16_cvtneps2bf16_512)
570 return upgradeX86BF16Intrinsic(F, ID, NewFn);
571
572 // Added in 9.0
574 .Case("dpbf16ps.128", Intrinsic::x86_avx512bf16_dpbf16ps_128)
575 .Case("dpbf16ps.256", Intrinsic::x86_avx512bf16_dpbf16ps_256)
576 .Case("dpbf16ps.512", Intrinsic::x86_avx512bf16_dpbf16ps_512)
579 return upgradeX86BF16DPIntrinsic(F, ID, NewFn);
580 return false; // No other 'x86.avx512bf16.*'.
581 }
582
583 if (Name.consume_front("xop.")) {
585 if (Name.starts_with("vpermil2")) { // Added in 3.9
586 // Upgrade any XOP PERMIL2 index operand still using a float/double
587 // vector.
588 auto Idx = F->getFunctionType()->getParamType(2);
589 if (Idx->isFPOrFPVectorTy()) {
590 unsigned IdxSize = Idx->getPrimitiveSizeInBits();
591 unsigned EltSize = Idx->getScalarSizeInBits();
592 if (EltSize == 64 && IdxSize == 128)
593 ID = Intrinsic::x86_xop_vpermil2pd;
594 else if (EltSize == 32 && IdxSize == 128)
595 ID = Intrinsic::x86_xop_vpermil2ps;
596 else if (EltSize == 64 && IdxSize == 256)
597 ID = Intrinsic::x86_xop_vpermil2pd_256;
598 else
599 ID = Intrinsic::x86_xop_vpermil2ps_256;
600 }
601 } else if (F->arg_size() == 2)
602 // frcz.ss/sd may need to have an argument dropped. Added in 3.2
604 .Case("vfrcz.ss", Intrinsic::x86_xop_vfrcz_ss)
605 .Case("vfrcz.sd", Intrinsic::x86_xop_vfrcz_sd)
607
609 rename(F);
610 NewFn = Intrinsic::getDeclaration(F->getParent(), ID);
611 return true;
612 }
613 return false; // No other 'x86.xop.*'
614 }
615
616 if (Name == "seh.recoverfp") {
617 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::eh_recoverfp);
618 return true;
619 }
620
621 return false;
622}
623
624// Upgrade ARM (IsArm) or Aarch64 (!IsArm) intrinsic fns. Return true iff so.
625// IsArm: 'arm.*', !IsArm: 'aarch64.*'.
628 Function *&NewFn) {
629 if (Name.starts_with("rbit")) {
630 // '(arm|aarch64).rbit'.
631 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::bitreverse,
632 F->arg_begin()->getType());
633 return true;
634 }
635
636 if (Name == "thread.pointer") {
637 // '(arm|aarch64).thread.pointer'.
638 NewFn =
639 Intrinsic::getDeclaration(F->getParent(), Intrinsic::thread_pointer);
640 return true;
641 }
642
643 bool Neon = Name.consume_front("neon.");
644 if (Neon) {
645 // '(arm|aarch64).neon.*'.
646 // Changed in 12.0: bfdot accept v4bf16 and v8bf16 instead of v8i8 and
647 // v16i8 respectively.
648 if (Name.consume_front("bfdot.")) {
649 // (arm|aarch64).neon.bfdot.*'.
652 .Cases("v2f32.v8i8", "v4f32.v16i8",
653 IsArm ? (Intrinsic::ID)Intrinsic::arm_neon_bfdot
654 : (Intrinsic::ID)Intrinsic::aarch64_neon_bfdot)
657 size_t OperandWidth = F->getReturnType()->getPrimitiveSizeInBits();
658 assert((OperandWidth == 64 || OperandWidth == 128) &&
659 "Unexpected operand width");
660 LLVMContext &Ctx = F->getParent()->getContext();
661 std::array<Type *, 2> Tys{
662 {F->getReturnType(),
663 FixedVectorType::get(Type::getBFloatTy(Ctx), OperandWidth / 16)}};
664 NewFn = Intrinsic::getDeclaration(F->getParent(), ID, Tys);
665 return true;
666 }
667 return false; // No other '(arm|aarch64).neon.bfdot.*'.
668 }
669
670 // Changed in 12.0: bfmmla, bfmlalb and bfmlalt are not polymorphic
671 // anymore and accept v8bf16 instead of v16i8.
672 if (Name.consume_front("bfm")) {
673 // (arm|aarch64).neon.bfm*'.
674 if (Name.consume_back(".v4f32.v16i8")) {
675 // (arm|aarch64).neon.bfm*.v4f32.v16i8'.
678 .Case("mla",
679 IsArm ? (Intrinsic::ID)Intrinsic::arm_neon_bfmmla
680 : (Intrinsic::ID)Intrinsic::aarch64_neon_bfmmla)
681 .Case("lalb",
682 IsArm ? (Intrinsic::ID)Intrinsic::arm_neon_bfmlalb
683 : (Intrinsic::ID)Intrinsic::aarch64_neon_bfmlalb)
684 .Case("lalt",
685 IsArm ? (Intrinsic::ID)Intrinsic::arm_neon_bfmlalt
686 : (Intrinsic::ID)Intrinsic::aarch64_neon_bfmlalt)
689 NewFn = Intrinsic::getDeclaration(F->getParent(), ID);
690 return true;
691 }
692 return false; // No other '(arm|aarch64).neon.bfm*.v16i8'.
693 }
694 return false; // No other '(arm|aarch64).neon.bfm*.
695 }
696 // Continue on to Aarch64 Neon or Arm Neon.
697 }
698 // Continue on to Arm or Aarch64.
699
700 if (IsArm) {
701 // 'arm.*'.
702 if (Neon) {
703 // 'arm.neon.*'.
705 .StartsWith("vclz.", Intrinsic::ctlz)
706 .StartsWith("vcnt.", Intrinsic::ctpop)
707 .StartsWith("vqadds.", Intrinsic::sadd_sat)
708 .StartsWith("vqaddu.", Intrinsic::uadd_sat)
709 .StartsWith("vqsubs.", Intrinsic::ssub_sat)
710 .StartsWith("vqsubu.", Intrinsic::usub_sat)
713 NewFn = Intrinsic::getDeclaration(F->getParent(), ID,
714 F->arg_begin()->getType());
715 return true;
716 }
717
718 if (Name.consume_front("vst")) {
719 // 'arm.neon.vst*'.
720 static const Regex vstRegex("^([1234]|[234]lane)\\.v[a-z0-9]*$");
722 if (vstRegex.match(Name, &Groups)) {
723 static const Intrinsic::ID StoreInts[] = {
724 Intrinsic::arm_neon_vst1, Intrinsic::arm_neon_vst2,
725 Intrinsic::arm_neon_vst3, Intrinsic::arm_neon_vst4};
726
727 static const Intrinsic::ID StoreLaneInts[] = {
728 Intrinsic::arm_neon_vst2lane, Intrinsic::arm_neon_vst3lane,
729 Intrinsic::arm_neon_vst4lane};
730
731 auto fArgs = F->getFunctionType()->params();
732 Type *Tys[] = {fArgs[0], fArgs[1]};
733 if (Groups[1].size() == 1)
734 NewFn = Intrinsic::getDeclaration(F->getParent(),
735 StoreInts[fArgs.size() - 3], Tys);
736 else
738 F->getParent(), StoreLaneInts[fArgs.size() - 5], Tys);
739 return true;
740 }
741 return false; // No other 'arm.neon.vst*'.
742 }
743
744 return false; // No other 'arm.neon.*'.
745 }
746
747 if (Name.consume_front("mve.")) {
748 // 'arm.mve.*'.
749 if (Name == "vctp64") {
750 if (cast<FixedVectorType>(F->getReturnType())->getNumElements() == 4) {
751 // A vctp64 returning a v4i1 is converted to return a v2i1. Rename
752 // the function and deal with it below in UpgradeIntrinsicCall.
753 rename(F);
754 return true;
755 }
756 return false; // Not 'arm.mve.vctp64'.
757 }
758
759 // These too are changed to accept a v2i1 instead of the old v4i1.
760 if (Name.consume_back(".v4i1")) {
761 // 'arm.mve.*.v4i1'.
762 if (Name.consume_back(".predicated.v2i64.v4i32"))
763 // 'arm.mve.*.predicated.v2i64.v4i32.v4i1'
764 return Name == "mull.int" || Name == "vqdmull";
765
766 if (Name.consume_back(".v2i64")) {
767 // 'arm.mve.*.v2i64.v4i1'
768 bool IsGather = Name.consume_front("vldr.gather.");
769 if (IsGather || Name.consume_front("vstr.scatter.")) {
770 if (Name.consume_front("base.")) {
771 // Optional 'wb.' prefix.
772 Name.consume_front("wb.");
773 // 'arm.mve.(vldr.gather|vstr.scatter).base.(wb.)?
774 // predicated.v2i64.v2i64.v4i1'.
775 return Name == "predicated.v2i64";
776 }
777
778 if (Name.consume_front("offset.predicated."))
779 return Name == (IsGather ? "v2i64.p0i64" : "p0i64.v2i64") ||
780 Name == (IsGather ? "v2i64.p0" : "p0.v2i64");
781
782 // No other 'arm.mve.(vldr.gather|vstr.scatter).*.v2i64.v4i1'.
783 return false;
784 }
785
786 return false; // No other 'arm.mve.*.v2i64.v4i1'.
787 }
788 return false; // No other 'arm.mve.*.v4i1'.
789 }
790 return false; // No other 'arm.mve.*'.
791 }
792
793 if (Name.consume_front("cde.vcx")) {
794 // 'arm.cde.vcx*'.
795 if (Name.consume_back(".predicated.v2i64.v4i1"))
796 // 'arm.cde.vcx*.predicated.v2i64.v4i1'.
797 return Name == "1q" || Name == "1qa" || Name == "2q" || Name == "2qa" ||
798 Name == "3q" || Name == "3qa";
799
800 return false; // No other 'arm.cde.vcx*'.
801 }
802 } else {
803 // 'aarch64.*'.
804 if (Neon) {
805 // 'aarch64.neon.*'.
807 .StartsWith("frintn", Intrinsic::roundeven)
808 .StartsWith("rbit", Intrinsic::bitreverse)
811 NewFn = Intrinsic::getDeclaration(F->getParent(), ID,
812 F->arg_begin()->getType());
813 return true;
814 }
815
816 if (Name.starts_with("addp")) {
817 // 'aarch64.neon.addp*'.
818 if (F->arg_size() != 2)
819 return false; // Invalid IR.
820 VectorType *Ty = dyn_cast<VectorType>(F->getReturnType());
821 if (Ty && Ty->getElementType()->isFloatingPointTy()) {
822 NewFn = Intrinsic::getDeclaration(F->getParent(),
823 Intrinsic::aarch64_neon_faddp, Ty);
824 return true;
825 }
826 }
827 return false; // No other 'aarch64.neon.*'.
828 }
829 if (Name.consume_front("sve.")) {
830 // 'aarch64.sve.*'.
831 if (Name.consume_front("bf")) {
832 if (Name.consume_back(".lane")) {
833 // 'aarch64.sve.bf*.lane'.
836 .Case("dot", Intrinsic::aarch64_sve_bfdot_lane_v2)
837 .Case("mlalb", Intrinsic::aarch64_sve_bfmlalb_lane_v2)
838 .Case("mlalt", Intrinsic::aarch64_sve_bfmlalt_lane_v2)
841 NewFn = Intrinsic::getDeclaration(F->getParent(), ID);
842 return true;
843 }
844 return false; // No other 'aarch64.sve.bf*.lane'.
845 }
846 return false; // No other 'aarch64.sve.bf*'.
847 }
848
849 if (Name.consume_front("addqv")) {
850 // 'aarch64.sve.addqv'.
851 if (!F->getReturnType()->isFPOrFPVectorTy())
852 return false;
853
854 auto Args = F->getFunctionType()->params();
855 Type *Tys[] = {F->getReturnType(), Args[1]};
856 NewFn = Intrinsic::getDeclaration(F->getParent(),
857 Intrinsic::aarch64_sve_faddqv, Tys);
858 return true;
859 }
860
861 if (Name.consume_front("ld")) {
862 // 'aarch64.sve.ld*'.
863 static const Regex LdRegex("^[234](.nxv[a-z0-9]+|$)");
864 if (LdRegex.match(Name)) {
865 Type *ScalarTy =
866 dyn_cast<VectorType>(F->getReturnType())->getElementType();
867 ElementCount EC = dyn_cast<VectorType>(F->arg_begin()->getType())
868 ->getElementCount();
869 Type *Ty = VectorType::get(ScalarTy, EC);
870 static const Intrinsic::ID LoadIDs[] = {
871 Intrinsic::aarch64_sve_ld2_sret,
872 Intrinsic::aarch64_sve_ld3_sret,
873 Intrinsic::aarch64_sve_ld4_sret,
874 };
875 NewFn = Intrinsic::getDeclaration(F->getParent(),
876 LoadIDs[Name[0] - '2'], Ty);
877 return true;
878 }
879 return false; // No other 'aarch64.sve.ld*'.
880 }
881
882 if (Name.consume_front("tuple.")) {
883 // 'aarch64.sve.tuple.*'.
884 if (Name.starts_with("get")) {
885 // 'aarch64.sve.tuple.get*'.
886 Type *Tys[] = {F->getReturnType(), F->arg_begin()->getType()};
887 NewFn = Intrinsic::getDeclaration(F->getParent(),
888 Intrinsic::vector_extract, Tys);
889 return true;
890 }
891
892 if (Name.starts_with("set")) {
893 // 'aarch64.sve.tuple.set*'.
894 auto Args = F->getFunctionType()->params();
895 Type *Tys[] = {Args[0], Args[2], Args[1]};
896 NewFn = Intrinsic::getDeclaration(F->getParent(),
897 Intrinsic::vector_insert, Tys);
898 return true;
899 }
900
901 static const Regex CreateTupleRegex("^create[234](.nxv[a-z0-9]+|$)");
902 if (CreateTupleRegex.match(Name)) {
903 // 'aarch64.sve.tuple.create*'.
904 auto Args = F->getFunctionType()->params();
905 Type *Tys[] = {F->getReturnType(), Args[1]};
906 NewFn = Intrinsic::getDeclaration(F->getParent(),
907 Intrinsic::vector_insert, Tys);
908 return true;
909 }
910 return false; // No other 'aarch64.sve.tuple.*'.
911 }
912 return false; // No other 'aarch64.sve.*'.
913 }
914 }
915 return false; // No other 'arm.*', 'aarch64.*'.
916}
917
919 if (Name.consume_front("abs."))
921 .Case("bf16", Intrinsic::nvvm_abs_bf16)
922 .Case("bf16x2", Intrinsic::nvvm_abs_bf16x2)
924
925 if (Name.consume_front("fma.rn."))
927 .Case("bf16", Intrinsic::nvvm_fma_rn_bf16)
928 .Case("bf16x2", Intrinsic::nvvm_fma_rn_bf16x2)
929 .Case("ftz.bf16", Intrinsic::nvvm_fma_rn_ftz_bf16)
930 .Case("ftz.bf16x2", Intrinsic::nvvm_fma_rn_ftz_bf16x2)
931 .Case("ftz.relu.bf16", Intrinsic::nvvm_fma_rn_ftz_relu_bf16)
932 .Case("ftz.relu.bf16x2", Intrinsic::nvvm_fma_rn_ftz_relu_bf16x2)
933 .Case("ftz.sat.bf16", Intrinsic::nvvm_fma_rn_ftz_sat_bf16)
934 .Case("ftz.sat.bf16x2", Intrinsic::nvvm_fma_rn_ftz_sat_bf16x2)
935 .Case("relu.bf16", Intrinsic::nvvm_fma_rn_relu_bf16)
936 .Case("relu.bf16x2", Intrinsic::nvvm_fma_rn_relu_bf16x2)
937 .Case("sat.bf16", Intrinsic::nvvm_fma_rn_sat_bf16)
938 .Case("sat.bf16x2", Intrinsic::nvvm_fma_rn_sat_bf16x2)
940
941 if (Name.consume_front("fmax."))
943 .Case("bf16", Intrinsic::nvvm_fmax_bf16)
944 .Case("bf16x2", Intrinsic::nvvm_fmax_bf16x2)
945 .Case("ftz.bf16", Intrinsic::nvvm_fmax_ftz_bf16)
946 .Case("ftz.bf16x2", Intrinsic::nvvm_fmax_ftz_bf16x2)
947 .Case("ftz.nan.bf16", Intrinsic::nvvm_fmax_ftz_nan_bf16)
948 .Case("ftz.nan.bf16x2", Intrinsic::nvvm_fmax_ftz_nan_bf16x2)
949 .Case("ftz.nan.xorsign.abs.bf16",
950 Intrinsic::nvvm_fmax_ftz_nan_xorsign_abs_bf16)
951 .Case("ftz.nan.xorsign.abs.bf16x2",
952 Intrinsic::nvvm_fmax_ftz_nan_xorsign_abs_bf16x2)
953 .Case("ftz.xorsign.abs.bf16", Intrinsic::nvvm_fmax_ftz_xorsign_abs_bf16)
954 .Case("ftz.xorsign.abs.bf16x2",
955 Intrinsic::nvvm_fmax_ftz_xorsign_abs_bf16x2)
956 .Case("nan.bf16", Intrinsic::nvvm_fmax_nan_bf16)
957 .Case("nan.bf16x2", Intrinsic::nvvm_fmax_nan_bf16x2)
958 .Case("nan.xorsign.abs.bf16", Intrinsic::nvvm_fmax_nan_xorsign_abs_bf16)
959 .Case("nan.xorsign.abs.bf16x2",
960 Intrinsic::nvvm_fmax_nan_xorsign_abs_bf16x2)
961 .Case("xorsign.abs.bf16", Intrinsic::nvvm_fmax_xorsign_abs_bf16)
962 .Case("xorsign.abs.bf16x2", Intrinsic::nvvm_fmax_xorsign_abs_bf16x2)
964
965 if (Name.consume_front("fmin."))
967 .Case("bf16", Intrinsic::nvvm_fmin_bf16)
968 .Case("bf16x2", Intrinsic::nvvm_fmin_bf16x2)
969 .Case("ftz.bf16", Intrinsic::nvvm_fmin_ftz_bf16)
970 .Case("ftz.bf16x2", Intrinsic::nvvm_fmin_ftz_bf16x2)
971 .Case("ftz.nan.bf16", Intrinsic::nvvm_fmin_ftz_nan_bf16)
972 .Case("ftz.nan.bf16x2", Intrinsic::nvvm_fmin_ftz_nan_bf16x2)
973 .Case("ftz.nan.xorsign.abs.bf16",
974 Intrinsic::nvvm_fmin_ftz_nan_xorsign_abs_bf16)
975 .Case("ftz.nan.xorsign.abs.bf16x2",
976 Intrinsic::nvvm_fmin_ftz_nan_xorsign_abs_bf16x2)
977 .Case("ftz.xorsign.abs.bf16", Intrinsic::nvvm_fmin_ftz_xorsign_abs_bf16)
978 .Case("ftz.xorsign.abs.bf16x2",
979 Intrinsic::nvvm_fmin_ftz_xorsign_abs_bf16x2)
980 .Case("nan.bf16", Intrinsic::nvvm_fmin_nan_bf16)
981 .Case("nan.bf16x2", Intrinsic::nvvm_fmin_nan_bf16x2)
982 .Case("nan.xorsign.abs.bf16", Intrinsic::nvvm_fmin_nan_xorsign_abs_bf16)
983 .Case("nan.xorsign.abs.bf16x2",
984 Intrinsic::nvvm_fmin_nan_xorsign_abs_bf16x2)
985 .Case("xorsign.abs.bf16", Intrinsic::nvvm_fmin_xorsign_abs_bf16)
986 .Case("xorsign.abs.bf16x2", Intrinsic::nvvm_fmin_xorsign_abs_bf16x2)
988
989 if (Name.consume_front("neg."))
991 .Case("bf16", Intrinsic::nvvm_neg_bf16)
992 .Case("bf16x2", Intrinsic::nvvm_neg_bf16x2)
994
996}
997
999 bool CanUpgradeDebugIntrinsicsToRecords) {
1000 assert(F && "Illegal to upgrade a non-existent Function.");
1001
1002 StringRef Name = F->getName();
1003
1004 // Quickly eliminate it, if it's not a candidate.
1005 if (!Name.consume_front("llvm.") || Name.empty())
1006 return false;
1007
1008 switch (Name[0]) {
1009 default: break;
1010 case 'a': {
1011 bool IsArm = Name.consume_front("arm.");
1012 if (IsArm || Name.consume_front("aarch64.")) {
1013 if (upgradeArmOrAarch64IntrinsicFunction(IsArm, F, Name, NewFn))
1014 return true;
1015 break;
1016 }
1017
1018 if (Name.consume_front("amdgcn.")) {
1019 if (Name == "alignbit") {
1020 // Target specific intrinsic became redundant
1021 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::fshr,
1022 {F->getReturnType()});
1023 return true;
1024 }
1025
1026 if (Name.consume_front("atomic.")) {
1027 if (Name.starts_with("inc") || Name.starts_with("dec")) {
1028 // These were replaced with atomicrmw uinc_wrap and udec_wrap, so
1029 // there's no new declaration.
1030 NewFn = nullptr;
1031 return true;
1032 }
1033 break; // No other 'amdgcn.atomic.*'
1034 }
1035
1036 if (Name.starts_with("ldexp.")) {
1037 // Target specific intrinsic became redundant
1039 F->getParent(), Intrinsic::ldexp,
1040 {F->getReturnType(), F->getArg(1)->getType()});
1041 return true;
1042 }
1043 break; // No other 'amdgcn.*'
1044 }
1045
1046 break;
1047 }
1048 case 'c': {
1049 if (F->arg_size() == 1) {
1051 .StartsWith("ctlz.", Intrinsic::ctlz)
1052 .StartsWith("cttz.", Intrinsic::cttz)
1055 rename(F);
1056 NewFn = Intrinsic::getDeclaration(F->getParent(), ID,
1057 F->arg_begin()->getType());
1058 return true;
1059 }
1060 }
1061
1062 if (F->arg_size() == 2 && Name.equals("coro.end")) {
1063 rename(F);
1064 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::coro_end);
1065 return true;
1066 }
1067
1068 break;
1069 }
1070 case 'd':
1071 if (Name.consume_front("dbg.")) {
1072 // Mark debug intrinsics for upgrade to new debug format.
1073 if (CanUpgradeDebugIntrinsicsToRecords &&
1074 F->getParent()->IsNewDbgInfoFormat) {
1075 if (Name == "addr" || Name == "value" || Name == "assign" ||
1076 Name == "declare" || Name == "label") {
1077 // There's no function to replace these with.
1078 NewFn = nullptr;
1079 // But we do want these to get upgraded.
1080 return true;
1081 }
1082 }
1083 // Update llvm.dbg.addr intrinsics even in "new debug mode"; they'll get
1084 // converted to DbgVariableRecords later.
1085 if (Name == "addr" || (Name == "value" && F->arg_size() == 4)) {
1086 rename(F);
1087 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::dbg_value);
1088 return true;
1089 }
1090 break; // No other 'dbg.*'.
1091 }
1092 break;
1093 case 'e':
1094 if (Name.consume_front("experimental.vector.")) {
1096 .StartsWith("extract.", Intrinsic::vector_extract)
1097 .StartsWith("insert.", Intrinsic::vector_insert)
1100 const auto *FT = F->getFunctionType();
1102 if (ID == Intrinsic::vector_extract)
1103 // Extracting overloads the return type.
1104 Tys.push_back(FT->getReturnType());
1105 Tys.push_back(FT->getParamType(0));
1106 if (ID == Intrinsic::vector_insert)
1107 // Inserting overloads the inserted type.
1108 Tys.push_back(FT->getParamType(1));
1109 rename(F);
1110 NewFn = Intrinsic::getDeclaration(F->getParent(), ID, Tys);
1111 return true;
1112 }
1113
1114 if (Name.consume_front("reduce.")) {
1116 static const Regex R("^([a-z]+)\\.[a-z][0-9]+");
1117 if (R.match(Name, &Groups))
1119 .Case("add", Intrinsic::vector_reduce_add)
1120 .Case("mul", Intrinsic::vector_reduce_mul)
1121 .Case("and", Intrinsic::vector_reduce_and)
1122 .Case("or", Intrinsic::vector_reduce_or)
1123 .Case("xor", Intrinsic::vector_reduce_xor)
1124 .Case("smax", Intrinsic::vector_reduce_smax)
1125 .Case("smin", Intrinsic::vector_reduce_smin)
1126 .Case("umax", Intrinsic::vector_reduce_umax)
1127 .Case("umin", Intrinsic::vector_reduce_umin)
1128 .Case("fmax", Intrinsic::vector_reduce_fmax)
1129 .Case("fmin", Intrinsic::vector_reduce_fmin)
1131
1132 bool V2 = false;
1134 static const Regex R2("^v2\\.([a-z]+)\\.[fi][0-9]+");
1135 Groups.clear();
1136 V2 = true;
1137 if (R2.match(Name, &Groups))
1139 .Case("fadd", Intrinsic::vector_reduce_fadd)
1140 .Case("fmul", Intrinsic::vector_reduce_fmul)
1142 }
1144 rename(F);
1145 auto Args = F->getFunctionType()->params();
1146 NewFn =
1147 Intrinsic::getDeclaration(F->getParent(), ID, {Args[V2 ? 1 : 0]});
1148 return true;
1149 }
1150 break; // No other 'expermental.vector.reduce.*'.
1151 }
1152 break; // No other 'experimental.vector.*'.
1153 }
1154 break; // No other 'e*'.
1155 case 'f':
1156 if (Name.starts_with("flt.rounds")) {
1157 rename(F);
1158 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::get_rounding);
1159 return true;
1160 }
1161 break;
1162 case 'i':
1163 if (Name.starts_with("invariant.group.barrier")) {
1164 // Rename invariant.group.barrier to launder.invariant.group
1165 auto Args = F->getFunctionType()->params();
1166 Type* ObjectPtr[1] = {Args[0]};
1167 rename(F);
1168 NewFn = Intrinsic::getDeclaration(F->getParent(),
1169 Intrinsic::launder_invariant_group, ObjectPtr);
1170 return true;
1171 }
1172 break;
1173 case 'm': {
1174 // Updating the memory intrinsics (memcpy/memmove/memset) that have an
1175 // alignment parameter to embedding the alignment as an attribute of
1176 // the pointer args.
1177 if (unsigned ID = StringSwitch<unsigned>(Name)
1178 .StartsWith("memcpy.", Intrinsic::memcpy)
1179 .StartsWith("memmove.", Intrinsic::memmove)
1180 .Default(0)) {
1181 if (F->arg_size() == 5) {
1182 rename(F);
1183 // Get the types of dest, src, and len
1184 ArrayRef<Type *> ParamTypes =
1185 F->getFunctionType()->params().slice(0, 3);
1186 NewFn = Intrinsic::getDeclaration(F->getParent(), ID, ParamTypes);
1187 return true;
1188 }
1189 }
1190 if (Name.starts_with("memset.") && F->arg_size() == 5) {
1191 rename(F);
1192 // Get the types of dest, and len
1193 const auto *FT = F->getFunctionType();
1194 Type *ParamTypes[2] = {
1195 FT->getParamType(0), // Dest
1196 FT->getParamType(2) // len
1197 };
1198 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::memset,
1199 ParamTypes);
1200 return true;
1201 }
1202 break;
1203 }
1204 case 'n': {
1205 if (Name.consume_front("nvvm.")) {
1206 // Check for nvvm intrinsics corresponding exactly to an LLVM intrinsic.
1207 if (F->arg_size() == 1) {
1208 Intrinsic::ID IID =
1210 .Cases("brev32", "brev64", Intrinsic::bitreverse)
1211 .Case("clz.i", Intrinsic::ctlz)
1212 .Case("popc.i", Intrinsic::ctpop)
1214 if (IID != Intrinsic::not_intrinsic) {
1215 NewFn = Intrinsic::getDeclaration(F->getParent(), IID,
1216 {F->getReturnType()});
1217 return true;
1218 }
1219 }
1220
1221 // Check for nvvm intrinsics that need a return type adjustment.
1222 if (!F->getReturnType()->getScalarType()->isBFloatTy()) {
1224 if (IID != Intrinsic::not_intrinsic) {
1225 NewFn = nullptr;
1226 return true;
1227 }
1228 }
1229
1230 // The following nvvm intrinsics correspond exactly to an LLVM idiom, but
1231 // not to an intrinsic alone. We expand them in UpgradeIntrinsicCall.
1232 //
1233 // TODO: We could add lohi.i2d.
1234 bool Expand = false;
1235 if (Name.consume_front("abs."))
1236 // nvvm.abs.{i,ii}
1237 Expand = Name == "i" || Name == "ll";
1238 else if (Name == "clz.ll" || Name == "popc.ll" || Name == "h2f")
1239 Expand = true;
1240 else if (Name.consume_front("max.") || Name.consume_front("min."))
1241 // nvvm.{min,max}.{i,ii,ui,ull}
1242 Expand = Name == "s" || Name == "i" || Name == "ll" || Name == "us" ||
1243 Name == "ui" || Name == "ull";
1244 else if (Name.consume_front("atomic.load.add."))
1245 // nvvm.atomic.load.add.{f32.p,f64.p}
1246 Expand = Name.starts_with("f32.p") || Name.starts_with("f64.p");
1247 else
1248 Expand = false;
1249
1250 if (Expand) {
1251 NewFn = nullptr;
1252 return true;
1253 }
1254 break; // No other 'nvvm.*'.
1255 }
1256 break;
1257 }
1258 case 'o':
1259 // We only need to change the name to match the mangling including the
1260 // address space.
1261 if (Name.starts_with("objectsize.")) {
1262 Type *Tys[2] = { F->getReturnType(), F->arg_begin()->getType() };
1263 if (F->arg_size() == 2 || F->arg_size() == 3 ||
1264 F->getName() !=
1265 Intrinsic::getName(Intrinsic::objectsize, Tys, F->getParent())) {
1266 rename(F);
1267 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::objectsize,
1268 Tys);
1269 return true;
1270 }
1271 }
1272 break;
1273
1274 case 'p':
1275 if (Name.starts_with("ptr.annotation.") && F->arg_size() == 4) {
1276 rename(F);
1278 F->getParent(), Intrinsic::ptr_annotation,
1279 {F->arg_begin()->getType(), F->getArg(1)->getType()});
1280 return true;
1281 }
1282 break;
1283
1284 case 'r': {
1285 if (Name.consume_front("riscv.")) {
1288 .Case("aes32dsi", Intrinsic::riscv_aes32dsi)
1289 .Case("aes32dsmi", Intrinsic::riscv_aes32dsmi)
1290 .Case("aes32esi", Intrinsic::riscv_aes32esi)
1291 .Case("aes32esmi", Intrinsic::riscv_aes32esmi)
1294 if (!F->getFunctionType()->getParamType(2)->isIntegerTy(32)) {
1295 rename(F);
1296 NewFn = Intrinsic::getDeclaration(F->getParent(), ID);
1297 return true;
1298 }
1299 break; // No other applicable upgrades.
1300 }
1301
1303 .StartsWith("sm4ks", Intrinsic::riscv_sm4ks)
1304 .StartsWith("sm4ed", Intrinsic::riscv_sm4ed)
1307 if (!F->getFunctionType()->getParamType(2)->isIntegerTy(32) ||
1308 F->getFunctionType()->getReturnType()->isIntegerTy(64)) {
1309 rename(F);
1310 NewFn = Intrinsic::getDeclaration(F->getParent(), ID);
1311 return true;
1312 }
1313 break; // No other applicable upgrades.
1314 }
1315
1317 .StartsWith("sha256sig0", Intrinsic::riscv_sha256sig0)
1318 .StartsWith("sha256sig1", Intrinsic::riscv_sha256sig1)
1319 .StartsWith("sha256sum0", Intrinsic::riscv_sha256sum0)
1320 .StartsWith("sha256sum1", Intrinsic::riscv_sha256sum1)
1321 .StartsWith("sm3p0", Intrinsic::riscv_sm3p0)
1322 .StartsWith("sm3p1", Intrinsic::riscv_sm3p1)
1325 if (F->getFunctionType()->getReturnType()->isIntegerTy(64)) {
1326 rename(F);
1327 NewFn = Intrinsic::getDeclaration(F->getParent(), ID);
1328 return true;
1329 }
1330 break; // No other applicable upgrades.
1331 }
1332 break; // No other 'riscv.*' intrinsics
1333 }
1334 } break;
1335
1336 case 's':
1337 if (Name == "stackprotectorcheck") {
1338 NewFn = nullptr;
1339 return true;
1340 }
1341 break;
1342
1343 case 'v': {
1344 if (Name == "var.annotation" && F->arg_size() == 4) {
1345 rename(F);
1347 F->getParent(), Intrinsic::var_annotation,
1348 {{F->arg_begin()->getType(), F->getArg(1)->getType()}});
1349 return true;
1350 }
1351 break;
1352 }
1353
1354 case 'w':
1355 if (Name.consume_front("wasm.")) {
1358 .StartsWith("fma.", Intrinsic::wasm_relaxed_madd)
1359 .StartsWith("fms.", Intrinsic::wasm_relaxed_nmadd)
1360 .StartsWith("laneselect.", Intrinsic::wasm_relaxed_laneselect)
1363 rename(F);
1364 NewFn =
1365 Intrinsic::getDeclaration(F->getParent(), ID, F->getReturnType());
1366 return true;
1367 }
1368
1369 if (Name.consume_front("dot.i8x16.i7x16.")) {
1371 .Case("signed", Intrinsic::wasm_relaxed_dot_i8x16_i7x16_signed)
1372 .Case("add.signed",
1373 Intrinsic::wasm_relaxed_dot_i8x16_i7x16_add_signed)
1376 rename(F);
1377 NewFn = Intrinsic::getDeclaration(F->getParent(), ID);
1378 return true;
1379 }
1380 break; // No other 'wasm.dot.i8x16.i7x16.*'.
1381 }
1382 break; // No other 'wasm.*'.
1383 }
1384 break;
1385
1386 case 'x':
1387 if (upgradeX86IntrinsicFunction(F, Name, NewFn))
1388 return true;
1389 }
1390
1391 auto *ST = dyn_cast<StructType>(F->getReturnType());
1392 if (ST && (!ST->isLiteral() || ST->isPacked()) &&
1393 F->getIntrinsicID() != Intrinsic::not_intrinsic) {
1394 // Replace return type with literal non-packed struct. Only do this for
1395 // intrinsics declared to return a struct, not for intrinsics with
1396 // overloaded return type, in which case the exact struct type will be
1397 // mangled into the name.
1400 if (Desc.front().Kind == Intrinsic::IITDescriptor::Struct) {
1401 auto *FT = F->getFunctionType();
1402 auto *NewST = StructType::get(ST->getContext(), ST->elements());
1403 auto *NewFT = FunctionType::get(NewST, FT->params(), FT->isVarArg());
1404 std::string Name = F->getName().str();
1405 rename(F);
1406 NewFn = Function::Create(NewFT, F->getLinkage(), F->getAddressSpace(),
1407 Name, F->getParent());
1408
1409 // The new function may also need remangling.
1410 if (auto Result = llvm::Intrinsic::remangleIntrinsicFunction(NewFn))
1411 NewFn = *Result;
1412 return true;
1413 }
1414 }
1415
1416 // Remangle our intrinsic since we upgrade the mangling
1418 if (Result != std::nullopt) {
1419 NewFn = *Result;
1420 return true;
1421 }
1422
1423 // This may not belong here. This function is effectively being overloaded
1424 // to both detect an intrinsic which needs upgrading, and to provide the
1425 // upgraded form of the intrinsic. We should perhaps have two separate
1426 // functions for this.
1427 return false;
1428}
1429
1431 bool CanUpgradeDebugIntrinsicsToRecords) {
1432 NewFn = nullptr;
1433 bool Upgraded =
1434 upgradeIntrinsicFunction1(F, NewFn, CanUpgradeDebugIntrinsicsToRecords);
1435 assert(F != NewFn && "Intrinsic function upgraded to the same function");
1436
1437 // Upgrade intrinsic attributes. This does not change the function.
1438 if (NewFn)
1439 F = NewFn;
1440 if (Intrinsic::ID id = F->getIntrinsicID())
1441 F->setAttributes(Intrinsic::getAttributes(F->getContext(), id));
1442 return Upgraded;
1443}
1444
1446 if (!(GV->hasName() && (GV->getName() == "llvm.global_ctors" ||
1447 GV->getName() == "llvm.global_dtors")) ||
1448 !GV->hasInitializer())
1449 return nullptr;
1450 ArrayType *ATy = dyn_cast<ArrayType>(GV->getValueType());
1451 if (!ATy)
1452 return nullptr;
1453 StructType *STy = dyn_cast<StructType>(ATy->getElementType());
1454 if (!STy || STy->getNumElements() != 2)
1455 return nullptr;
1456
1457 LLVMContext &C = GV->getContext();
1458 IRBuilder<> IRB(C);
1459 auto EltTy = StructType::get(STy->getElementType(0), STy->getElementType(1),
1460 IRB.getPtrTy());
1461 Constant *Init = GV->getInitializer();
1462 unsigned N = Init->getNumOperands();
1463 std::vector<Constant *> NewCtors(N);
1464 for (unsigned i = 0; i != N; ++i) {
1465 auto Ctor = cast<Constant>(Init->getOperand(i));
1466 NewCtors[i] = ConstantStruct::get(EltTy, Ctor->getAggregateElement(0u),
1467 Ctor->getAggregateElement(1),
1469 }
1470 Constant *NewInit = ConstantArray::get(ArrayType::get(EltTy, N), NewCtors);
1471
1472 return new GlobalVariable(NewInit->getType(), false, GV->getLinkage(),
1473 NewInit, GV->getName());
1474}
1475
1476// Handles upgrading SSE2/AVX2/AVX512BW PSLLDQ intrinsics by converting them
1477// to byte shuffles.
1479 unsigned Shift) {
1480 auto *ResultTy = cast<FixedVectorType>(Op->getType());
1481 unsigned NumElts = ResultTy->getNumElements() * 8;
1482
1483 // Bitcast from a 64-bit element type to a byte element type.
1484 Type *VecTy = FixedVectorType::get(Builder.getInt8Ty(), NumElts);
1485 Op = Builder.CreateBitCast(Op, VecTy, "cast");
1486
1487 // We'll be shuffling in zeroes.
1488 Value *Res = Constant::getNullValue(VecTy);
1489
1490 // If shift is less than 16, emit a shuffle to move the bytes. Otherwise,
1491 // we'll just return the zero vector.
1492 if (Shift < 16) {
1493 int Idxs[64];
1494 // 256/512-bit version is split into 2/4 16-byte lanes.
1495 for (unsigned l = 0; l != NumElts; l += 16)
1496 for (unsigned i = 0; i != 16; ++i) {
1497 unsigned Idx = NumElts + i - Shift;
1498 if (Idx < NumElts)
1499 Idx -= NumElts - 16; // end of lane, switch operand.
1500 Idxs[l + i] = Idx + l;
1501 }
1502
1503 Res = Builder.CreateShuffleVector(Res, Op, ArrayRef(Idxs, NumElts));
1504 }
1505
1506 // Bitcast back to a 64-bit element type.
1507 return Builder.CreateBitCast(Res, ResultTy, "cast");
1508}
1509
1510// Handles upgrading SSE2/AVX2/AVX512BW PSRLDQ intrinsics by converting them
1511// to byte shuffles.
1513 unsigned Shift) {
1514 auto *ResultTy = cast<FixedVectorType>(Op->getType());
1515 unsigned NumElts = ResultTy->getNumElements() * 8;
1516
1517 // Bitcast from a 64-bit element type to a byte element type.
1518 Type *VecTy = FixedVectorType::get(Builder.getInt8Ty(), NumElts);
1519 Op = Builder.CreateBitCast(Op, VecTy, "cast");
1520
1521 // We'll be shuffling in zeroes.
1522 Value *Res = Constant::getNullValue(VecTy);
1523
1524 // If shift is less than 16, emit a shuffle to move the bytes. Otherwise,
1525 // we'll just return the zero vector.
1526 if (Shift < 16) {
1527 int Idxs[64];
1528 // 256/512-bit version is split into 2/4 16-byte lanes.
1529 for (unsigned l = 0; l != NumElts; l += 16)
1530 for (unsigned i = 0; i != 16; ++i) {
1531 unsigned Idx = i + Shift;
1532 if (Idx >= 16)
1533 Idx += NumElts - 16; // end of lane, switch operand.
1534 Idxs[l + i] = Idx + l;
1535 }
1536
1537 Res = Builder.CreateShuffleVector(Op, Res, ArrayRef(Idxs, NumElts));
1538 }
1539
1540 // Bitcast back to a 64-bit element type.
1541 return Builder.CreateBitCast(Res, ResultTy, "cast");
1542}
1543
1544static Value *getX86MaskVec(IRBuilder<> &Builder, Value *Mask,
1545 unsigned NumElts) {
1546 assert(isPowerOf2_32(NumElts) && "Expected power-of-2 mask elements");
1548 Builder.getInt1Ty(), cast<IntegerType>(Mask->getType())->getBitWidth());
1549 Mask = Builder.CreateBitCast(Mask, MaskTy);
1550
1551 // If we have less than 8 elements (1, 2 or 4), then the starting mask was an
1552 // i8 and we need to extract down to the right number of elements.
1553 if (NumElts <= 4) {
1554 int Indices[4];
1555 for (unsigned i = 0; i != NumElts; ++i)
1556 Indices[i] = i;
1557 Mask = Builder.CreateShuffleVector(Mask, Mask, ArrayRef(Indices, NumElts),
1558 "extract");
1559 }
1560
1561 return Mask;
1562}
1563
1564static Value *emitX86Select(IRBuilder<> &Builder, Value *Mask, Value *Op0,
1565 Value *Op1) {
1566 // If the mask is all ones just emit the first operation.
1567 if (const auto *C = dyn_cast<Constant>(Mask))
1568 if (C->isAllOnesValue())
1569 return Op0;
1570
1571 Mask = getX86MaskVec(Builder, Mask,
1572 cast<FixedVectorType>(Op0->getType())->getNumElements());
1573 return Builder.CreateSelect(Mask, Op0, Op1);
1574}
1575
1576static Value *emitX86ScalarSelect(IRBuilder<> &Builder, Value *Mask, Value *Op0,
1577 Value *Op1) {
1578 // If the mask is all ones just emit the first operation.
1579 if (const auto *C = dyn_cast<Constant>(Mask))
1580 if (C->isAllOnesValue())
1581 return Op0;
1582
1583 auto *MaskTy = FixedVectorType::get(Builder.getInt1Ty(),
1584 Mask->getType()->getIntegerBitWidth());
1585 Mask = Builder.CreateBitCast(Mask, MaskTy);
1586 Mask = Builder.CreateExtractElement(Mask, (uint64_t)0);
1587 return Builder.CreateSelect(Mask, Op0, Op1);
1588}
1589
1590// Handle autoupgrade for masked PALIGNR and VALIGND/Q intrinsics.
1591// PALIGNR handles large immediates by shifting while VALIGN masks the immediate
1592// so we need to handle both cases. VALIGN also doesn't have 128-bit lanes.
1594 Value *Op1, Value *Shift,
1595 Value *Passthru, Value *Mask,
1596 bool IsVALIGN) {
1597 unsigned ShiftVal = cast<llvm::ConstantInt>(Shift)->getZExtValue();
1598
1599 unsigned NumElts = cast<FixedVectorType>(Op0->getType())->getNumElements();
1600 assert((IsVALIGN || NumElts % 16 == 0) && "Illegal NumElts for PALIGNR!");
1601 assert((!IsVALIGN || NumElts <= 16) && "NumElts too large for VALIGN!");
1602 assert(isPowerOf2_32(NumElts) && "NumElts not a power of 2!");
1603
1604 // Mask the immediate for VALIGN.
1605 if (IsVALIGN)
1606 ShiftVal &= (NumElts - 1);
1607
1608 // If palignr is shifting the pair of vectors more than the size of two
1609 // lanes, emit zero.
1610 if (ShiftVal >= 32)
1612
1613 // If palignr is shifting the pair of input vectors more than one lane,
1614 // but less than two lanes, convert to shifting in zeroes.
1615 if (ShiftVal > 16) {
1616 ShiftVal -= 16;
1617 Op1 = Op0;
1619 }
1620
1621 int Indices[64];
1622 // 256-bit palignr operates on 128-bit lanes so we need to handle that
1623 for (unsigned l = 0; l < NumElts; l += 16) {
1624 for (unsigned i = 0; i != 16; ++i) {
1625 unsigned Idx = ShiftVal + i;
1626 if (!IsVALIGN && Idx >= 16) // Disable wrap for VALIGN.
1627 Idx += NumElts - 16; // End of lane, switch operand.
1628 Indices[l + i] = Idx + l;
1629 }
1630 }
1631
1632 Value *Align = Builder.CreateShuffleVector(
1633 Op1, Op0, ArrayRef(Indices, NumElts), "palignr");
1634
1635 return emitX86Select(Builder, Mask, Align, Passthru);
1636}
1637
1639 bool ZeroMask, bool IndexForm) {
1640 Type *Ty = CI.getType();
1641 unsigned VecWidth = Ty->getPrimitiveSizeInBits();
1642 unsigned EltWidth = Ty->getScalarSizeInBits();
1643 bool IsFloat = Ty->isFPOrFPVectorTy();
1644 Intrinsic::ID IID;
1645 if (VecWidth == 128 && EltWidth == 32 && IsFloat)
1646 IID = Intrinsic::x86_avx512_vpermi2var_ps_128;
1647 else if (VecWidth == 128 && EltWidth == 32 && !IsFloat)
1648 IID = Intrinsic::x86_avx512_vpermi2var_d_128;
1649 else if (VecWidth == 128 && EltWidth == 64 && IsFloat)
1650 IID = Intrinsic::x86_avx512_vpermi2var_pd_128;
1651 else if (VecWidth == 128 && EltWidth == 64 && !IsFloat)
1652 IID = Intrinsic::x86_avx512_vpermi2var_q_128;
1653 else if (VecWidth == 256 && EltWidth == 32 && IsFloat)
1654 IID = Intrinsic::x86_avx512_vpermi2var_ps_256;
1655 else if (VecWidth == 256 && EltWidth == 32 && !IsFloat)
1656 IID = Intrinsic::x86_avx512_vpermi2var_d_256;
1657 else if (VecWidth == 256 && EltWidth == 64 && IsFloat)
1658 IID = Intrinsic::x86_avx512_vpermi2var_pd_256;
1659 else if (VecWidth == 256 && EltWidth == 64 && !IsFloat)
1660 IID = Intrinsic::x86_avx512_vpermi2var_q_256;
1661 else if (VecWidth == 512 && EltWidth == 32 && IsFloat)
1662 IID = Intrinsic::x86_avx512_vpermi2var_ps_512;
1663 else if (VecWidth == 512 && EltWidth == 32 && !IsFloat)
1664 IID = Intrinsic::x86_avx512_vpermi2var_d_512;
1665 else if (VecWidth == 512 && EltWidth == 64 && IsFloat)
1666 IID = Intrinsic::x86_avx512_vpermi2var_pd_512;
1667 else if (VecWidth == 512 && EltWidth == 64 && !IsFloat)
1668 IID = Intrinsic::x86_avx512_vpermi2var_q_512;
1669 else if (VecWidth == 128 && EltWidth == 16)
1670 IID = Intrinsic::x86_avx512_vpermi2var_hi_128;
1671 else if (VecWidth == 256 && EltWidth == 16)
1672 IID = Intrinsic::x86_avx512_vpermi2var_hi_256;
1673 else if (VecWidth == 512 && EltWidth == 16)
1674 IID = Intrinsic::x86_avx512_vpermi2var_hi_512;
1675 else if (VecWidth == 128 && EltWidth == 8)
1676 IID = Intrinsic::x86_avx512_vpermi2var_qi_128;
1677 else if (VecWidth == 256 && EltWidth == 8)
1678 IID = Intrinsic::x86_avx512_vpermi2var_qi_256;
1679 else if (VecWidth == 512 && EltWidth == 8)
1680 IID = Intrinsic::x86_avx512_vpermi2var_qi_512;
1681 else
1682 llvm_unreachable("Unexpected intrinsic");
1683
1684 Value *Args[] = { CI.getArgOperand(0) , CI.getArgOperand(1),
1685 CI.getArgOperand(2) };
1686
1687 // If this isn't index form we need to swap operand 0 and 1.
1688 if (!IndexForm)
1689 std::swap(Args[0], Args[1]);
1690
1691 Value *V = Builder.CreateCall(Intrinsic::getDeclaration(CI.getModule(), IID),
1692 Args);
1693 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(Ty)
1694 : Builder.CreateBitCast(CI.getArgOperand(1),
1695 Ty);
1696 return emitX86Select(Builder, CI.getArgOperand(3), V, PassThru);
1697}
1698
1700 Intrinsic::ID IID) {
1701 Type *Ty = CI.getType();
1702 Value *Op0 = CI.getOperand(0);
1703 Value *Op1 = CI.getOperand(1);
1704 Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID, Ty);
1705 Value *Res = Builder.CreateCall(Intrin, {Op0, Op1});
1706
1707 if (CI.arg_size() == 4) { // For masked intrinsics.
1708 Value *VecSrc = CI.getOperand(2);
1709 Value *Mask = CI.getOperand(3);
1710 Res = emitX86Select(Builder, Mask, Res, VecSrc);
1711 }
1712 return Res;
1713}
1714
1716 bool IsRotateRight) {
1717 Type *Ty = CI.getType();
1718 Value *Src = CI.getArgOperand(0);
1719 Value *Amt = CI.getArgOperand(1);
1720
1721 // Amount may be scalar immediate, in which case create a splat vector.
1722 // Funnel shifts amounts are treated as modulo and types are all power-of-2 so
1723 // we only care about the lowest log2 bits anyway.
1724 if (Amt->getType() != Ty) {
1725 unsigned NumElts = cast<FixedVectorType>(Ty)->getNumElements();
1726 Amt = Builder.CreateIntCast(Amt, Ty->getScalarType(), false);
1727 Amt = Builder.CreateVectorSplat(NumElts, Amt);
1728 }
1729
1730 Intrinsic::ID IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl;
1731 Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID, Ty);
1732 Value *Res = Builder.CreateCall(Intrin, {Src, Src, Amt});
1733
1734 if (CI.arg_size() == 4) { // For masked intrinsics.
1735 Value *VecSrc = CI.getOperand(2);
1736 Value *Mask = CI.getOperand(3);
1737 Res = emitX86Select(Builder, Mask, Res, VecSrc);
1738 }
1739 return Res;
1740}
1741
1742static Value *upgradeX86vpcom(IRBuilder<> &Builder, CallBase &CI, unsigned Imm,
1743 bool IsSigned) {
1744 Type *Ty = CI.getType();
1745 Value *LHS = CI.getArgOperand(0);
1746 Value *RHS = CI.getArgOperand(1);
1747
1748 CmpInst::Predicate Pred;
1749 switch (Imm) {
1750 case 0x0:
1751 Pred = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
1752 break;
1753 case 0x1:
1754 Pred = IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
1755 break;
1756 case 0x2:
1757 Pred = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
1758 break;
1759 case 0x3:
1760 Pred = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
1761 break;
1762 case 0x4:
1763 Pred = ICmpInst::ICMP_EQ;
1764 break;
1765 case 0x5:
1766 Pred = ICmpInst::ICMP_NE;
1767 break;
1768 case 0x6:
1769 return Constant::getNullValue(Ty); // FALSE
1770 case 0x7:
1771 return Constant::getAllOnesValue(Ty); // TRUE
1772 default:
1773 llvm_unreachable("Unknown XOP vpcom/vpcomu predicate");
1774 }
1775
1776 Value *Cmp = Builder.CreateICmp(Pred, LHS, RHS);
1777 Value *Ext = Builder.CreateSExt(Cmp, Ty);
1778 return Ext;
1779}
1780
1782 bool IsShiftRight, bool ZeroMask) {
1783 Type *Ty = CI.getType();
1784 Value *Op0 = CI.getArgOperand(0);
1785 Value *Op1 = CI.getArgOperand(1);
1786 Value *Amt = CI.getArgOperand(2);
1787
1788 if (IsShiftRight)
1789 std::swap(Op0, Op1);
1790
1791 // Amount may be scalar immediate, in which case create a splat vector.
1792 // Funnel shifts amounts are treated as modulo and types are all power-of-2 so
1793 // we only care about the lowest log2 bits anyway.
1794 if (Amt->getType() != Ty) {
1795 unsigned NumElts = cast<FixedVectorType>(Ty)->getNumElements();
1796 Amt = Builder.CreateIntCast(Amt, Ty->getScalarType(), false);
1797 Amt = Builder.CreateVectorSplat(NumElts, Amt);
1798 }
1799
1800 Intrinsic::ID IID = IsShiftRight ? Intrinsic::fshr : Intrinsic::fshl;
1801 Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID, Ty);
1802 Value *Res = Builder.CreateCall(Intrin, {Op0, Op1, Amt});
1803
1804 unsigned NumArgs = CI.arg_size();
1805 if (NumArgs >= 4) { // For masked intrinsics.
1806 Value *VecSrc = NumArgs == 5 ? CI.getArgOperand(3) :
1807 ZeroMask ? ConstantAggregateZero::get(CI.getType()) :
1808 CI.getArgOperand(0);
1809 Value *Mask = CI.getOperand(NumArgs - 1);
1810 Res = emitX86Select(Builder, Mask, Res, VecSrc);
1811 }
1812 return Res;
1813}
1814
1816 Value *Mask, bool Aligned) {
1817 // Cast the pointer to the right type.
1818 Ptr = Builder.CreateBitCast(Ptr,
1819 llvm::PointerType::getUnqual(Data->getType()));
1820 const Align Alignment =
1821 Aligned
1822 ? Align(Data->getType()->getPrimitiveSizeInBits().getFixedValue() / 8)
1823 : Align(1);
1824
1825 // If the mask is all ones just emit a regular store.
1826 if (const auto *C = dyn_cast<Constant>(Mask))
1827 if (C->isAllOnesValue())
1828 return Builder.CreateAlignedStore(Data, Ptr, Alignment);
1829
1830 // Convert the mask from an integer type to a vector of i1.
1831 unsigned NumElts = cast<FixedVectorType>(Data->getType())->getNumElements();
1832 Mask = getX86MaskVec(Builder, Mask, NumElts);
1833 return Builder.CreateMaskedStore(Data, Ptr, Alignment, Mask);
1834}
1835
1837 Value *Passthru, Value *Mask, bool Aligned) {
1838 Type *ValTy = Passthru->getType();
1839 // Cast the pointer to the right type.
1841 const Align Alignment =
1842 Aligned
1843 ? Align(
1845 8)
1846 : Align(1);
1847
1848 // If the mask is all ones just emit a regular store.
1849 if (const auto *C = dyn_cast<Constant>(Mask))
1850 if (C->isAllOnesValue())
1851 return Builder.CreateAlignedLoad(ValTy, Ptr, Alignment);
1852
1853 // Convert the mask from an integer type to a vector of i1.
1854 unsigned NumElts = cast<FixedVectorType>(ValTy)->getNumElements();
1855 Mask = getX86MaskVec(Builder, Mask, NumElts);
1856 return Builder.CreateMaskedLoad(ValTy, Ptr, Alignment, Mask, Passthru);
1857}
1858
1859static Value *upgradeAbs(IRBuilder<> &Builder, CallBase &CI) {
1860 Type *Ty = CI.getType();
1861 Value *Op0 = CI.getArgOperand(0);
1862 Function *F = Intrinsic::getDeclaration(CI.getModule(), Intrinsic::abs, Ty);
1863 Value *Res = Builder.CreateCall(F, {Op0, Builder.getInt1(false)});
1864 if (CI.arg_size() == 3)
1865 Res = emitX86Select(Builder, CI.getArgOperand(2), Res, CI.getArgOperand(1));
1866 return Res;
1867}
1868
1869static Value *upgradePMULDQ(IRBuilder<> &Builder, CallBase &CI, bool IsSigned) {
1870 Type *Ty = CI.getType();
1871
1872 // Arguments have a vXi32 type so cast to vXi64.
1873 Value *LHS = Builder.CreateBitCast(CI.getArgOperand(0), Ty);
1874 Value *RHS = Builder.CreateBitCast(CI.getArgOperand(1), Ty);
1875
1876 if (IsSigned) {
1877 // Shift left then arithmetic shift right.
1878 Constant *ShiftAmt = ConstantInt::get(Ty, 32);
1879 LHS = Builder.CreateShl(LHS, ShiftAmt);
1880 LHS = Builder.CreateAShr(LHS, ShiftAmt);
1881 RHS = Builder.CreateShl(RHS, ShiftAmt);
1882 RHS = Builder.CreateAShr(RHS, ShiftAmt);
1883 } else {
1884 // Clear the upper bits.
1885 Constant *Mask = ConstantInt::get(Ty, 0xffffffff);
1886 LHS = Builder.CreateAnd(LHS, Mask);
1887 RHS = Builder.CreateAnd(RHS, Mask);
1888 }
1889
1890 Value *Res = Builder.CreateMul(LHS, RHS);
1891
1892 if (CI.arg_size() == 4)
1893 Res = emitX86Select(Builder, CI.getArgOperand(3), Res, CI.getArgOperand(2));
1894
1895 return Res;
1896}
1897
1898// Applying mask on vector of i1's and make sure result is at least 8 bits wide.
1900 Value *Mask) {
1901 unsigned NumElts = cast<FixedVectorType>(Vec->getType())->getNumElements();
1902 if (Mask) {
1903 const auto *C = dyn_cast<Constant>(Mask);
1904 if (!C || !C->isAllOnesValue())
1905 Vec = Builder.CreateAnd(Vec, getX86MaskVec(Builder, Mask, NumElts));
1906 }
1907
1908 if (NumElts < 8) {
1909 int Indices[8];
1910 for (unsigned i = 0; i != NumElts; ++i)
1911 Indices[i] = i;
1912 for (unsigned i = NumElts; i != 8; ++i)
1913 Indices[i] = NumElts + i % NumElts;
1914 Vec = Builder.CreateShuffleVector(Vec,
1916 Indices);
1917 }
1918 return Builder.CreateBitCast(Vec, Builder.getIntNTy(std::max(NumElts, 8U)));
1919}
1920
1922 unsigned CC, bool Signed) {
1923 Value *Op0 = CI.getArgOperand(0);
1924 unsigned NumElts = cast<FixedVectorType>(Op0->getType())->getNumElements();
1925
1926 Value *Cmp;
1927 if (CC == 3) {
1929 FixedVectorType::get(Builder.getInt1Ty(), NumElts));
1930 } else if (CC == 7) {
1932 FixedVectorType::get(Builder.getInt1Ty(), NumElts));
1933 } else {
1935 switch (CC) {
1936 default: llvm_unreachable("Unknown condition code");
1937 case 0: Pred = ICmpInst::ICMP_EQ; break;
1938 case 1: Pred = Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; break;
1939 case 2: Pred = Signed ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; break;
1940 case 4: Pred = ICmpInst::ICMP_NE; break;
1941 case 5: Pred = Signed ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; break;
1942 case 6: Pred = Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; break;
1943 }
1944 Cmp = Builder.CreateICmp(Pred, Op0, CI.getArgOperand(1));
1945 }
1946
1947 Value *Mask = CI.getArgOperand(CI.arg_size() - 1);
1948
1949 return applyX86MaskOn1BitsVec(Builder, Cmp, Mask);
1950}
1951
1952// Replace a masked intrinsic with an older unmasked intrinsic.
1954 Intrinsic::ID IID) {
1955 Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID);
1956 Value *Rep = Builder.CreateCall(Intrin,
1957 { CI.getArgOperand(0), CI.getArgOperand(1) });
1958 return emitX86Select(Builder, CI.getArgOperand(3), Rep, CI.getArgOperand(2));
1959}
1960
1962 Value* A = CI.getArgOperand(0);
1963 Value* B = CI.getArgOperand(1);
1964 Value* Src = CI.getArgOperand(2);
1965 Value* Mask = CI.getArgOperand(3);
1966
1967 Value* AndNode = Builder.CreateAnd(Mask, APInt(8, 1));
1968 Value* Cmp = Builder.CreateIsNotNull(AndNode);
1969 Value* Extract1 = Builder.CreateExtractElement(B, (uint64_t)0);
1970 Value* Extract2 = Builder.CreateExtractElement(Src, (uint64_t)0);
1971 Value* Select = Builder.CreateSelect(Cmp, Extract1, Extract2);
1972 return Builder.CreateInsertElement(A, Select, (uint64_t)0);
1973}
1974
1976 Value* Op = CI.getArgOperand(0);
1977 Type* ReturnOp = CI.getType();
1978 unsigned NumElts = cast<FixedVectorType>(CI.getType())->getNumElements();
1979 Value *Mask = getX86MaskVec(Builder, Op, NumElts);
1980 return Builder.CreateSExt(Mask, ReturnOp, "vpmovm2");
1981}
1982
1983// Replace intrinsic with unmasked version and a select.
1985 CallBase &CI, Value *&Rep) {
1986 Name = Name.substr(12); // Remove avx512.mask.
1987
1988 unsigned VecWidth = CI.getType()->getPrimitiveSizeInBits();
1989 unsigned EltWidth = CI.getType()->getScalarSizeInBits();
1990 Intrinsic::ID IID;
1991 if (Name.starts_with("max.p")) {
1992 if (VecWidth == 128 && EltWidth == 32)
1993 IID = Intrinsic::x86_sse_max_ps;
1994 else if (VecWidth == 128 && EltWidth == 64)
1995 IID = Intrinsic::x86_sse2_max_pd;
1996 else if (VecWidth == 256 && EltWidth == 32)
1997 IID = Intrinsic::x86_avx_max_ps_256;
1998 else if (VecWidth == 256 && EltWidth == 64)
1999 IID = Intrinsic::x86_avx_max_pd_256;
2000 else
2001 llvm_unreachable("Unexpected intrinsic");
2002 } else if (Name.starts_with("min.p")) {
2003 if (VecWidth == 128 && EltWidth == 32)
2004 IID = Intrinsic::x86_sse_min_ps;
2005 else if (VecWidth == 128 && EltWidth == 64)
2006 IID = Intrinsic::x86_sse2_min_pd;
2007 else if (VecWidth == 256 && EltWidth == 32)
2008 IID = Intrinsic::x86_avx_min_ps_256;
2009 else if (VecWidth == 256 && EltWidth == 64)
2010 IID = Intrinsic::x86_avx_min_pd_256;
2011 else
2012 llvm_unreachable("Unexpected intrinsic");
2013 } else if (Name.starts_with("pshuf.b.")) {
2014 if (VecWidth == 128)
2015 IID = Intrinsic::x86_ssse3_pshuf_b_128;
2016 else if (VecWidth == 256)
2017 IID = Intrinsic::x86_avx2_pshuf_b;
2018 else if (VecWidth == 512)
2019 IID = Intrinsic::x86_avx512_pshuf_b_512;
2020 else
2021 llvm_unreachable("Unexpected intrinsic");
2022 } else if (Name.starts_with("pmul.hr.sw.")) {
2023 if (VecWidth == 128)
2024 IID = Intrinsic::x86_ssse3_pmul_hr_sw_128;
2025 else if (VecWidth == 256)
2026 IID = Intrinsic::x86_avx2_pmul_hr_sw;
2027 else if (VecWidth == 512)
2028 IID = Intrinsic::x86_avx512_pmul_hr_sw_512;
2029 else
2030 llvm_unreachable("Unexpected intrinsic");
2031 } else if (Name.starts_with("pmulh.w.")) {
2032 if (VecWidth == 128)
2033 IID = Intrinsic::x86_sse2_pmulh_w;
2034 else if (VecWidth == 256)
2035 IID = Intrinsic::x86_avx2_pmulh_w;
2036 else if (VecWidth == 512)
2037 IID = Intrinsic::x86_avx512_pmulh_w_512;
2038 else
2039 llvm_unreachable("Unexpected intrinsic");
2040 } else if (Name.starts_with("pmulhu.w.")) {
2041 if (VecWidth == 128)
2042 IID = Intrinsic::x86_sse2_pmulhu_w;
2043 else if (VecWidth == 256)
2044 IID = Intrinsic::x86_avx2_pmulhu_w;
2045 else if (VecWidth == 512)
2046 IID = Intrinsic::x86_avx512_pmulhu_w_512;
2047 else
2048 llvm_unreachable("Unexpected intrinsic");
2049 } else if (Name.starts_with("pmaddw.d.")) {
2050 if (VecWidth == 128)
2051 IID = Intrinsic::x86_sse2_pmadd_wd;
2052 else if (VecWidth == 256)
2053 IID = Intrinsic::x86_avx2_pmadd_wd;
2054 else if (VecWidth == 512)
2055 IID = Intrinsic::x86_avx512_pmaddw_d_512;
2056 else
2057 llvm_unreachable("Unexpected intrinsic");
2058 } else if (Name.starts_with("pmaddubs.w.")) {
2059 if (VecWidth == 128)
2060 IID = Intrinsic::x86_ssse3_pmadd_ub_sw_128;
2061 else if (VecWidth == 256)
2062 IID = Intrinsic::x86_avx2_pmadd_ub_sw;
2063 else if (VecWidth == 512)
2064 IID = Intrinsic::x86_avx512_pmaddubs_w_512;
2065 else
2066 llvm_unreachable("Unexpected intrinsic");
2067 } else if (Name.starts_with("packsswb.")) {
2068 if (VecWidth == 128)
2069 IID = Intrinsic::x86_sse2_packsswb_128;
2070 else if (VecWidth == 256)
2071 IID = Intrinsic::x86_avx2_packsswb;
2072 else if (VecWidth == 512)
2073 IID = Intrinsic::x86_avx512_packsswb_512;
2074 else
2075 llvm_unreachable("Unexpected intrinsic");
2076 } else if (Name.starts_with("packssdw.")) {
2077 if (VecWidth == 128)
2078 IID = Intrinsic::x86_sse2_packssdw_128;
2079 else if (VecWidth == 256)
2080 IID = Intrinsic::x86_avx2_packssdw;
2081 else if (VecWidth == 512)
2082 IID = Intrinsic::x86_avx512_packssdw_512;
2083 else
2084 llvm_unreachable("Unexpected intrinsic");
2085 } else if (Name.starts_with("packuswb.")) {
2086 if (VecWidth == 128)
2087 IID = Intrinsic::x86_sse2_packuswb_128;
2088 else if (VecWidth == 256)
2089 IID = Intrinsic::x86_avx2_packuswb;
2090 else if (VecWidth == 512)
2091 IID = Intrinsic::x86_avx512_packuswb_512;
2092 else
2093 llvm_unreachable("Unexpected intrinsic");
2094 } else if (Name.starts_with("packusdw.")) {
2095 if (VecWidth == 128)
2096 IID = Intrinsic::x86_sse41_packusdw;
2097 else if (VecWidth == 256)
2098 IID = Intrinsic::x86_avx2_packusdw;
2099 else if (VecWidth == 512)
2100 IID = Intrinsic::x86_avx512_packusdw_512;
2101 else
2102 llvm_unreachable("Unexpected intrinsic");
2103 } else if (Name.starts_with("vpermilvar.")) {
2104 if (VecWidth == 128 && EltWidth == 32)
2105 IID = Intrinsic::x86_avx_vpermilvar_ps;
2106 else if (VecWidth == 128 && EltWidth == 64)
2107 IID = Intrinsic::x86_avx_vpermilvar_pd;
2108 else if (VecWidth == 256 && EltWidth == 32)
2109 IID = Intrinsic::x86_avx_vpermilvar_ps_256;
2110 else if (VecWidth == 256 && EltWidth == 64)
2111 IID = Intrinsic::x86_avx_vpermilvar_pd_256;
2112 else if (VecWidth == 512 && EltWidth == 32)
2113 IID = Intrinsic::x86_avx512_vpermilvar_ps_512;
2114 else if (VecWidth == 512 && EltWidth == 64)
2115 IID = Intrinsic::x86_avx512_vpermilvar_pd_512;
2116 else
2117 llvm_unreachable("Unexpected intrinsic");
2118 } else if (Name == "cvtpd2dq.256") {
2119 IID = Intrinsic::x86_avx_cvt_pd2dq_256;
2120 } else if (Name == "cvtpd2ps.256") {
2121 IID = Intrinsic::x86_avx_cvt_pd2_ps_256;
2122 } else if (Name == "cvttpd2dq.256") {
2123 IID = Intrinsic::x86_avx_cvtt_pd2dq_256;
2124 } else if (Name == "cvttps2dq.128") {
2125 IID = Intrinsic::x86_sse2_cvttps2dq;
2126 } else if (Name == "cvttps2dq.256") {
2127 IID = Intrinsic::x86_avx_cvtt_ps2dq_256;
2128 } else if (Name.starts_with("permvar.")) {
2129 bool IsFloat = CI.getType()->isFPOrFPVectorTy();
2130 if (VecWidth == 256 && EltWidth == 32 && IsFloat)
2131 IID = Intrinsic::x86_avx2_permps;
2132 else if (VecWidth == 256 && EltWidth == 32 && !IsFloat)
2133 IID = Intrinsic::x86_avx2_permd;
2134 else if (VecWidth == 256 && EltWidth == 64 && IsFloat)
2135 IID = Intrinsic::x86_avx512_permvar_df_256;
2136 else if (VecWidth == 256 && EltWidth == 64 && !IsFloat)
2137 IID = Intrinsic::x86_avx512_permvar_di_256;
2138 else if (VecWidth == 512 && EltWidth == 32 && IsFloat)
2139 IID = Intrinsic::x86_avx512_permvar_sf_512;
2140 else if (VecWidth == 512 && EltWidth == 32 && !IsFloat)
2141 IID = Intrinsic::x86_avx512_permvar_si_512;
2142 else if (VecWidth == 512 && EltWidth == 64 && IsFloat)
2143 IID = Intrinsic::x86_avx512_permvar_df_512;
2144 else if (VecWidth == 512 && EltWidth == 64 && !IsFloat)
2145 IID = Intrinsic::x86_avx512_permvar_di_512;
2146 else if (VecWidth == 128 && EltWidth == 16)
2147 IID = Intrinsic::x86_avx512_permvar_hi_128;
2148 else if (VecWidth == 256 && EltWidth == 16)
2149 IID = Intrinsic::x86_avx512_permvar_hi_256;
2150 else if (VecWidth == 512 && EltWidth == 16)
2151 IID = Intrinsic::x86_avx512_permvar_hi_512;
2152 else if (VecWidth == 128 && EltWidth == 8)
2153 IID = Intrinsic::x86_avx512_permvar_qi_128;
2154 else if (VecWidth == 256 && EltWidth == 8)
2155 IID = Intrinsic::x86_avx512_permvar_qi_256;
2156 else if (VecWidth == 512 && EltWidth == 8)
2157 IID = Intrinsic::x86_avx512_permvar_qi_512;
2158 else
2159 llvm_unreachable("Unexpected intrinsic");
2160 } else if (Name.starts_with("dbpsadbw.")) {
2161 if (VecWidth == 128)
2162 IID = Intrinsic::x86_avx512_dbpsadbw_128;
2163 else if (VecWidth == 256)
2164 IID = Intrinsic::x86_avx512_dbpsadbw_256;
2165 else if (VecWidth == 512)
2166 IID = Intrinsic::x86_avx512_dbpsadbw_512;
2167 else
2168 llvm_unreachable("Unexpected intrinsic");
2169 } else if (Name.starts_with("pmultishift.qb.")) {
2170 if (VecWidth == 128)
2171 IID = Intrinsic::x86_avx512_pmultishift_qb_128;
2172 else if (VecWidth == 256)
2173 IID = Intrinsic::x86_avx512_pmultishift_qb_256;
2174 else if (VecWidth == 512)
2175 IID = Intrinsic::x86_avx512_pmultishift_qb_512;
2176 else
2177 llvm_unreachable("Unexpected intrinsic");
2178 } else if (Name.starts_with("conflict.")) {
2179 if (Name[9] == 'd' && VecWidth == 128)
2180 IID = Intrinsic::x86_avx512_conflict_d_128;
2181 else if (Name[9] == 'd' && VecWidth == 256)
2182 IID = Intrinsic::x86_avx512_conflict_d_256;
2183 else if (Name[9] == 'd' && VecWidth == 512)
2184 IID = Intrinsic::x86_avx512_conflict_d_512;
2185 else if (Name[9] == 'q' && VecWidth == 128)
2186 IID = Intrinsic::x86_avx512_conflict_q_128;
2187 else if (Name[9] == 'q' && VecWidth == 256)
2188 IID = Intrinsic::x86_avx512_conflict_q_256;
2189 else if (Name[9] == 'q' && VecWidth == 512)
2190 IID = Intrinsic::x86_avx512_conflict_q_512;
2191 else
2192 llvm_unreachable("Unexpected intrinsic");
2193 } else if (Name.starts_with("pavg.")) {
2194 if (Name[5] == 'b' && VecWidth == 128)
2195 IID = Intrinsic::x86_sse2_pavg_b;
2196 else if (Name[5] == 'b' && VecWidth == 256)
2197 IID = Intrinsic::x86_avx2_pavg_b;
2198 else if (Name[5] == 'b' && VecWidth == 512)
2199 IID = Intrinsic::x86_avx512_pavg_b_512;
2200 else if (Name[5] == 'w' && VecWidth == 128)
2201 IID = Intrinsic::x86_sse2_pavg_w;
2202 else if (Name[5] == 'w' && VecWidth == 256)
2203 IID = Intrinsic::x86_avx2_pavg_w;
2204 else if (Name[5] == 'w' && VecWidth == 512)
2205 IID = Intrinsic::x86_avx512_pavg_w_512;
2206 else
2207 llvm_unreachable("Unexpected intrinsic");
2208 } else
2209 return false;
2210
2211 SmallVector<Value *, 4> Args(CI.args());
2212 Args.pop_back();
2213 Args.pop_back();
2214 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI.getModule(), IID),
2215 Args);
2216 unsigned NumArgs = CI.arg_size();
2217 Rep = emitX86Select(Builder, CI.getArgOperand(NumArgs - 1), Rep,
2218 CI.getArgOperand(NumArgs - 2));
2219 return true;
2220}
2221
2222/// Upgrade comment in call to inline asm that represents an objc retain release
2223/// marker.
2224void llvm::UpgradeInlineAsmString(std::string *AsmStr) {
2225 size_t Pos;
2226 if (AsmStr->find("mov\tfp") == 0 &&
2227 AsmStr->find("objc_retainAutoreleaseReturnValue") != std::string::npos &&
2228 (Pos = AsmStr->find("# marker")) != std::string::npos) {
2229 AsmStr->replace(Pos, 1, ";");
2230 }
2231}
2232
2234 IRBuilder<> &Builder) {
2235 if (Name == "mve.vctp64.old") {
2236 // Replace the old v4i1 vctp64 with a v2i1 vctp and predicate-casts to the
2237 // correct type.
2238 Value *VCTP = Builder.CreateCall(
2239 Intrinsic::getDeclaration(F->getParent(), Intrinsic::arm_mve_vctp64),
2240 CI->getArgOperand(0), CI->getName());
2241 Value *C1 = Builder.CreateCall(
2243 F->getParent(), Intrinsic::arm_mve_pred_v2i,
2244 {VectorType::get(Builder.getInt1Ty(), 2, false)}),
2245 VCTP);
2246 return Builder.CreateCall(
2248 F->getParent(), Intrinsic::arm_mve_pred_i2v,
2249 {VectorType::get(Builder.getInt1Ty(), 4, false)}),
2250 C1);
2251 } else if (Name == "mve.mull.int.predicated.v2i64.v4i32.v4i1" ||
2252 Name == "mve.vqdmull.predicated.v2i64.v4i32.v4i1" ||
2253 Name == "mve.vldr.gather.base.predicated.v2i64.v2i64.v4i1" ||
2254 Name == "mve.vldr.gather.base.wb.predicated.v2i64.v2i64.v4i1" ||
2255 Name ==
2256 "mve.vldr.gather.offset.predicated.v2i64.p0i64.v2i64.v4i1" ||
2257 Name == "mve.vldr.gather.offset.predicated.v2i64.p0.v2i64.v4i1" ||
2258 Name == "mve.vstr.scatter.base.predicated.v2i64.v2i64.v4i1" ||
2259 Name == "mve.vstr.scatter.base.wb.predicated.v2i64.v2i64.v4i1" ||
2260 Name ==
2261 "mve.vstr.scatter.offset.predicated.p0i64.v2i64.v2i64.v4i1" ||
2262 Name == "mve.vstr.scatter.offset.predicated.p0.v2i64.v2i64.v4i1" ||
2263 Name == "cde.vcx1q.predicated.v2i64.v4i1" ||
2264 Name == "cde.vcx1qa.predicated.v2i64.v4i1" ||
2265 Name == "cde.vcx2q.predicated.v2i64.v4i1" ||
2266 Name == "cde.vcx2qa.predicated.v2i64.v4i1" ||
2267 Name == "cde.vcx3q.predicated.v2i64.v4i1" ||
2268 Name == "cde.vcx3qa.predicated.v2i64.v4i1") {
2269 std::vector<Type *> Tys;
2270 unsigned ID = CI->getIntrinsicID();
2271 Type *V2I1Ty = FixedVectorType::get(Builder.getInt1Ty(), 2);
2272 switch (ID) {
2273 case Intrinsic::arm_mve_mull_int_predicated:
2274 case Intrinsic::arm_mve_vqdmull_predicated:
2275 case Intrinsic::arm_mve_vldr_gather_base_predicated:
2276 Tys = {CI->getType(), CI->getOperand(0)->getType(), V2I1Ty};
2277 break;
2278 case Intrinsic::arm_mve_vldr_gather_base_wb_predicated:
2279 case Intrinsic::arm_mve_vstr_scatter_base_predicated:
2280 case Intrinsic::arm_mve_vstr_scatter_base_wb_predicated:
2281 Tys = {CI->getOperand(0)->getType(), CI->getOperand(0)->getType(),
2282 V2I1Ty};
2283 break;
2284 case Intrinsic::arm_mve_vldr_gather_offset_predicated:
2285 Tys = {CI->getType(), CI->getOperand(0)->getType(),
2286 CI->getOperand(1)->getType(), V2I1Ty};
2287 break;
2288 case Intrinsic::arm_mve_vstr_scatter_offset_predicated:
2289 Tys = {CI->getOperand(0)->getType(), CI->getOperand(1)->getType(),
2290 CI->getOperand(2)->getType(), V2I1Ty};
2291 break;
2292 case Intrinsic::arm_cde_vcx1q_predicated:
2293 case Intrinsic::arm_cde_vcx1qa_predicated:
2294 case Intrinsic::arm_cde_vcx2q_predicated:
2295 case Intrinsic::arm_cde_vcx2qa_predicated:
2296 case Intrinsic::arm_cde_vcx3q_predicated:
2297 case Intrinsic::arm_cde_vcx3qa_predicated:
2298 Tys = {CI->getOperand(1)->getType(), V2I1Ty};
2299 break;
2300 default:
2301 llvm_unreachable("Unhandled Intrinsic!");
2302 }
2303
2304 std::vector<Value *> Ops;
2305 for (Value *Op : CI->args()) {
2306 Type *Ty = Op->getType();
2307 if (Ty->getScalarSizeInBits() == 1) {
2308 Value *C1 = Builder.CreateCall(
2310 F->getParent(), Intrinsic::arm_mve_pred_v2i,
2311 {VectorType::get(Builder.getInt1Ty(), 4, false)}),
2312 Op);
2313 Op = Builder.CreateCall(
2314 Intrinsic::getDeclaration(F->getParent(),
2315 Intrinsic::arm_mve_pred_i2v, {V2I1Ty}),
2316 C1);
2317 }
2318 Ops.push_back(Op);
2319 }
2320
2321 Function *Fn = Intrinsic::getDeclaration(F->getParent(), ID, Tys);
2322 return Builder.CreateCall(Fn, Ops, CI->getName());
2323 }
2324 llvm_unreachable("Unknown function for ARM CallBase upgrade.");
2325}
2326
2328 Function *F, IRBuilder<> &Builder) {
2329 const bool IsInc = Name.starts_with("atomic.inc.");
2330 if (IsInc || Name.starts_with("atomic.dec.")) {
2331 if (CI->getNumOperands() != 6) // Malformed bitcode.
2332 return nullptr;
2333
2334 AtomicRMWInst::BinOp RMWOp =
2336
2337 Value *Ptr = CI->getArgOperand(0);
2338 Value *Val = CI->getArgOperand(1);
2339 ConstantInt *OrderArg = dyn_cast<ConstantInt>(CI->getArgOperand(2));
2340 ConstantInt *VolatileArg = dyn_cast<ConstantInt>(CI->getArgOperand(4));
2341
2342 AtomicOrdering Order = AtomicOrdering::SequentiallyConsistent;
2343 if (OrderArg && isValidAtomicOrdering(OrderArg->getZExtValue()))
2344 Order = static_cast<AtomicOrdering>(OrderArg->getZExtValue());
2345 if (Order == AtomicOrdering::NotAtomic ||
2346 Order == AtomicOrdering::Unordered)
2347 Order = AtomicOrdering::SequentiallyConsistent;
2348
2349 // The scope argument never really worked correctly. Use agent as the most
2350 // conservative option which should still always produce the instruction.
2351 SyncScope::ID SSID = F->getContext().getOrInsertSyncScopeID("agent");
2352 AtomicRMWInst *RMW =
2353 Builder.CreateAtomicRMW(RMWOp, Ptr, Val, std::nullopt, Order, SSID);
2354
2355 if (!VolatileArg || !VolatileArg->isZero())
2356 RMW->setVolatile(true);
2357 return RMW;
2358 }
2359
2360 llvm_unreachable("Unknown function for AMDGPU intrinsic upgrade.");
2361}
2362
2363/// Helper to unwrap intrinsic call MetadataAsValue operands.
2364template <typename MDType>
2365static MDType *unwrapMAVOp(CallBase *CI, unsigned Op) {
2366 if (MetadataAsValue *MAV = dyn_cast<MetadataAsValue>(CI->getArgOperand(Op)))
2367 return dyn_cast<MDType>(MAV->getMetadata());
2368 return nullptr;
2369}
2370
2371/// Convert debug intrinsic calls to non-instruction debug records.
2372/// \p Name - Final part of the intrinsic name, e.g. 'value' in llvm.dbg.value.
2373/// \p CI - The debug intrinsic call.
2375 DbgRecord *DR = nullptr;
2376 if (Name == "label") {
2377 DR = new DbgLabelRecord(unwrapMAVOp<DILabel>(CI, 0), CI->getDebugLoc());
2378 } else if (Name == "assign") {
2379 DR = new DbgVariableRecord(
2380 unwrapMAVOp<Metadata>(CI, 0), unwrapMAVOp<DILocalVariable>(CI, 1),
2381 unwrapMAVOp<DIExpression>(CI, 2), unwrapMAVOp<DIAssignID>(CI, 3),
2382 unwrapMAVOp<Metadata>(CI, 4), unwrapMAVOp<DIExpression>(CI, 5),
2383 CI->getDebugLoc());
2384 } else if (Name == "declare") {
2385 DR = new DbgVariableRecord(
2386 unwrapMAVOp<Metadata>(CI, 0), unwrapMAVOp<DILocalVariable>(CI, 1),
2387 unwrapMAVOp<DIExpression>(CI, 2), CI->getDebugLoc(),
2388 DbgVariableRecord::LocationType::Declare);
2389 } else if (Name == "addr") {
2390 // Upgrade dbg.addr to dbg.value with DW_OP_deref.
2391 DIExpression *Expr = unwrapMAVOp<DIExpression>(CI, 2);
2392 Expr = DIExpression::append(Expr, dwarf::DW_OP_deref);
2393 DR = new DbgVariableRecord(unwrapMAVOp<Metadata>(CI, 0),
2394 unwrapMAVOp<DILocalVariable>(CI, 1), Expr,
2395 CI->getDebugLoc());
2396 } else if (Name == "value") {
2397 // An old version of dbg.value had an extra offset argument.
2398 unsigned VarOp = 1;
2399 unsigned ExprOp = 2;
2400 if (CI->arg_size() == 4) {
2401 auto *Offset = dyn_cast_or_null<Constant>(CI->getArgOperand(1));
2402 // Nonzero offset dbg.values get dropped without a replacement.
2403 if (!Offset || !Offset->isZeroValue())
2404 return;
2405 VarOp = 2;
2406 ExprOp = 3;
2407 }
2408 DR = new DbgVariableRecord(
2409 unwrapMAVOp<Metadata>(CI, 0), unwrapMAVOp<DILocalVariable>(CI, VarOp),
2410 unwrapMAVOp<DIExpression>(CI, ExprOp), CI->getDebugLoc());
2411 }
2412 assert(DR && "Unhandled intrinsic kind in upgrade to DbgRecord");
2414}
2415
2416/// Upgrade a call to an old intrinsic. All argument and return casting must be
2417/// provided to seamlessly integrate with existing context.
2419 // Note dyn_cast to Function is not quite the same as getCalledFunction, which
2420 // checks the callee's function type matches. It's likely we need to handle
2421 // type changes here.
2422 Function *F = dyn_cast<Function>(CI->getCalledOperand());
2423 if (!F)
2424 return;
2425
2426 LLVMContext &C = CI->getContext();
2427 IRBuilder<> Builder(C);
2428 Builder.SetInsertPoint(CI->getParent(), CI->getIterator());
2429
2430 if (!NewFn) {
2431 bool FallthroughToDefaultUpgrade = false;
2432 // Get the Function's name.
2433 StringRef Name = F->getName();
2434
2435 assert(Name.starts_with("llvm.") && "Intrinsic doesn't start with 'llvm.'");
2436 Name = Name.substr(5);
2437
2438 bool IsX86 = Name.consume_front("x86.");
2439 bool IsNVVM = Name.consume_front("nvvm.");
2440 bool IsARM = Name.consume_front("arm.");
2441 bool IsAMDGCN = Name.consume_front("amdgcn.");
2442 bool IsDbg = Name.consume_front("dbg.");
2443
2444 if (IsX86 && Name.starts_with("sse4a.movnt.")) {
2446 Elts.push_back(
2447 ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1)));
2448 MDNode *Node = MDNode::get(C, Elts);
2449
2450 Value *Arg0 = CI->getArgOperand(0);
2451 Value *Arg1 = CI->getArgOperand(1);
2452
2453 // Nontemporal (unaligned) store of the 0'th element of the float/double
2454 // vector.
2455 Type *SrcEltTy = cast<VectorType>(Arg1->getType())->getElementType();
2456 PointerType *EltPtrTy = PointerType::getUnqual(SrcEltTy);
2457 Value *Addr = Builder.CreateBitCast(Arg0, EltPtrTy, "cast");
2458 Value *Extract =
2459 Builder.CreateExtractElement(Arg1, (uint64_t)0, "extractelement");
2460
2461 StoreInst *SI = Builder.CreateAlignedStore(Extract, Addr, Align(1));
2462 SI->setMetadata(LLVMContext::MD_nontemporal, Node);
2463
2464 // Remove intrinsic.
2465 CI->eraseFromParent();
2466 return;
2467 }
2468
2469 if (IsX86 && (Name.starts_with("avx.movnt.") ||
2470 Name.starts_with("avx512.storent."))) {
2472 Elts.push_back(
2473 ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1)));
2474 MDNode *Node = MDNode::get(C, Elts);
2475
2476 Value *Arg0 = CI->getArgOperand(0);
2477 Value *Arg1 = CI->getArgOperand(1);
2478
2479 // Convert the type of the pointer to a pointer to the stored type.
2480 Value *BC = Builder.CreateBitCast(Arg0,
2481 PointerType::getUnqual(Arg1->getType()),
2482 "cast");
2483 StoreInst *SI = Builder.CreateAlignedStore(
2484 Arg1, BC,
2486 SI->setMetadata(LLVMContext::MD_nontemporal, Node);
2487
2488 // Remove intrinsic.
2489 CI->eraseFromParent();
2490 return;
2491 }
2492
2493 if (IsX86 && Name == "sse2.storel.dq") {
2494 Value *Arg0 = CI->getArgOperand(0);
2495 Value *Arg1 = CI->getArgOperand(1);
2496
2497 auto *NewVecTy = FixedVectorType::get(Type::getInt64Ty(C), 2);
2498 Value *BC0 = Builder.CreateBitCast(Arg1, NewVecTy, "cast");
2499 Value *Elt = Builder.CreateExtractElement(BC0, (uint64_t)0);
2500 Value *BC = Builder.CreateBitCast(Arg0,
2501 PointerType::getUnqual(Elt->getType()),
2502 "cast");
2503 Builder.CreateAlignedStore(Elt, BC, Align(1));
2504
2505 // Remove intrinsic.
2506 CI->eraseFromParent();
2507 return;
2508 }
2509
2510 if (IsX86 && (Name.starts_with("sse.storeu.") ||
2511 Name.starts_with("sse2.storeu.") ||
2512 Name.starts_with("avx.storeu."))) {
2513 Value *Arg0 = CI->getArgOperand(0);
2514 Value *Arg1 = CI->getArgOperand(1);
2515
2516 Arg0 = Builder.CreateBitCast(Arg0,
2517 PointerType::getUnqual(Arg1->getType()),
2518 "cast");
2519 Builder.CreateAlignedStore(Arg1, Arg0, Align(1));
2520
2521 // Remove intrinsic.
2522 CI->eraseFromParent();
2523 return;
2524 }
2525
2526 if (IsX86 && Name == "avx512.mask.store.ss") {
2527 Value *Mask = Builder.CreateAnd(CI->getArgOperand(2), Builder.getInt8(1));
2528 upgradeMaskedStore(Builder, CI->getArgOperand(0), CI->getArgOperand(1),
2529 Mask, false);
2530
2531 // Remove intrinsic.
2532 CI->eraseFromParent();
2533 return;
2534 }
2535
2536 if (IsX86 && Name.starts_with("avx512.mask.store")) {
2537 // "avx512.mask.storeu." or "avx512.mask.store."
2538 bool Aligned = Name[17] != 'u'; // "avx512.mask.storeu".
2539 upgradeMaskedStore(Builder, CI->getArgOperand(0), CI->getArgOperand(1),
2540 CI->getArgOperand(2), Aligned);
2541
2542 // Remove intrinsic.
2543 CI->eraseFromParent();
2544 return;
2545 }
2546
2547 Value *Rep = nullptr;
2548 // Upgrade packed integer vector compare intrinsics to compare instructions.
2549 if (IsX86 && (Name.starts_with("sse2.pcmp") ||
2550 Name.starts_with("avx2.pcmp"))) {
2551 // "sse2.pcpmpeq." "sse2.pcmpgt." "avx2.pcmpeq." or "avx2.pcmpgt."
2552 bool CmpEq = Name[9] == 'e';
2553 Rep = Builder.CreateICmp(CmpEq ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_SGT,
2554 CI->getArgOperand(0), CI->getArgOperand(1));
2555 Rep = Builder.CreateSExt(Rep, CI->getType(), "");
2556 } else if (IsX86 && (Name.starts_with("avx512.broadcastm"))) {
2557 Type *ExtTy = Type::getInt32Ty(C);
2558 if (CI->getOperand(0)->getType()->isIntegerTy(8))
2559 ExtTy = Type::getInt64Ty(C);
2560 unsigned NumElts = CI->getType()->getPrimitiveSizeInBits() /
2561 ExtTy->getPrimitiveSizeInBits();
2562 Rep = Builder.CreateZExt(CI->getArgOperand(0), ExtTy);
2563 Rep = Builder.CreateVectorSplat(NumElts, Rep);
2564 } else if (IsX86 && (Name == "sse.sqrt.ss" ||
2565 Name == "sse2.sqrt.sd")) {
2566 Value *Vec = CI->getArgOperand(0);
2567 Value *Elt0 = Builder.CreateExtractElement(Vec, (uint64_t)0);
2568 Function *Intr = Intrinsic::getDeclaration(F->getParent(),
2569 Intrinsic::sqrt, Elt0->getType());
2570 Elt0 = Builder.CreateCall(Intr, Elt0);
2571 Rep = Builder.CreateInsertElement(Vec, Elt0, (uint64_t)0);
2572 } else if (IsX86 && (Name.starts_with("avx.sqrt.p") ||
2573 Name.starts_with("sse2.sqrt.p") ||
2574 Name.starts_with("sse.sqrt.p"))) {
2575 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(),
2576 Intrinsic::sqrt,
2577 CI->getType()),
2578 {CI->getArgOperand(0)});
2579 } else if (IsX86 && (Name.starts_with("avx512.mask.sqrt.p"))) {
2580 if (CI->arg_size() == 4 &&
2581 (!isa<ConstantInt>(CI->getArgOperand(3)) ||
2582 cast<ConstantInt>(CI->getArgOperand(3))->getZExtValue() != 4)) {
2583 Intrinsic::ID IID = Name[18] == 's' ? Intrinsic::x86_avx512_sqrt_ps_512
2584 : Intrinsic::x86_avx512_sqrt_pd_512;
2585
2586 Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(3) };
2588 IID), Args);
2589 } else {
2590 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(),
2591 Intrinsic::sqrt,
2592 CI->getType()),
2593 {CI->getArgOperand(0)});
2594 }
2595 Rep = emitX86Select(Builder, CI->getArgOperand(2), Rep,
2596 CI->getArgOperand(1));
2597 } else if (IsX86 && (Name.starts_with("avx512.ptestm") ||
2598 Name.starts_with("avx512.ptestnm"))) {
2599 Value *Op0 = CI->getArgOperand(0);
2600 Value *Op1 = CI->getArgOperand(1);
2601 Value *Mask = CI->getArgOperand(2);
2602 Rep = Builder.CreateAnd(Op0, Op1);
2603 llvm::Type *Ty = Op0->getType();
2605 ICmpInst::Predicate Pred =
2606 Name.starts_with("avx512.ptestm") ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ;
2607 Rep = Builder.CreateICmp(Pred, Rep, Zero);
2608 Rep = applyX86MaskOn1BitsVec(Builder, Rep, Mask);
2609 } else if (IsX86 && (Name.starts_with("avx512.mask.pbroadcast"))){
2610 unsigned NumElts = cast<FixedVectorType>(CI->getArgOperand(1)->getType())
2611 ->getNumElements();
2612 Rep = Builder.CreateVectorSplat(NumElts, CI->getArgOperand(0));
2613 Rep = emitX86Select(Builder, CI->getArgOperand(2), Rep,
2614 CI->getArgOperand(1));
2615 } else if (IsX86 && (Name.starts_with("avx512.kunpck"))) {
2616 unsigned NumElts = CI->getType()->getScalarSizeInBits();
2617 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), NumElts);
2618 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), NumElts);
2619 int Indices[64];
2620 for (unsigned i = 0; i != NumElts; ++i)
2621 Indices[i] = i;
2622
2623 // First extract half of each vector. This gives better codegen than
2624 // doing it in a single shuffle.
2625 LHS =
2626 Builder.CreateShuffleVector(LHS, LHS, ArrayRef(Indices, NumElts / 2));
2627 RHS =
2628 Builder.CreateShuffleVector(RHS, RHS, ArrayRef(Indices, NumElts / 2));
2629 // Concat the vectors.
2630 // NOTE: Operands have to be swapped to match intrinsic definition.
2631 Rep = Builder.CreateShuffleVector(RHS, LHS, ArrayRef(Indices, NumElts));
2632 Rep = Builder.CreateBitCast(Rep, CI->getType());
2633 } else if (IsX86 && Name == "avx512.kand.w") {
2634 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
2635 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
2636 Rep = Builder.CreateAnd(LHS, RHS);
2637 Rep = Builder.CreateBitCast(Rep, CI->getType());
2638 } else if (IsX86 && Name == "avx512.kandn.w") {
2639 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
2640 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
2641 LHS = Builder.CreateNot(LHS);
2642 Rep = Builder.CreateAnd(LHS, RHS);
2643 Rep = Builder.CreateBitCast(Rep, CI->getType());
2644 } else if (IsX86 && Name == "avx512.kor.w") {
2645 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
2646 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
2647 Rep = Builder.CreateOr(LHS, RHS);
2648 Rep = Builder.CreateBitCast(Rep, CI->getType());
2649 } else if (IsX86 && Name == "avx512.kxor.w") {
2650 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
2651 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
2652 Rep = Builder.CreateXor(LHS, RHS);
2653 Rep = Builder.CreateBitCast(Rep, CI->getType());
2654 } else if (IsX86 && Name == "avx512.kxnor.w") {
2655 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
2656 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
2657 LHS = Builder.CreateNot(LHS);
2658 Rep = Builder.CreateXor(LHS, RHS);
2659 Rep = Builder.CreateBitCast(Rep, CI->getType());
2660 } else if (IsX86 && Name == "avx512.knot.w") {
2661 Rep = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
2662 Rep = Builder.CreateNot(Rep);
2663 Rep = Builder.CreateBitCast(Rep, CI->getType());
2664 } else if (IsX86 &&
2665 (Name == "avx512.kortestz.w" || Name == "avx512.kortestc.w")) {
2666 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
2667 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
2668 Rep = Builder.CreateOr(LHS, RHS);
2669 Rep = Builder.CreateBitCast(Rep, Builder.getInt16Ty());
2670 Value *C;
2671 if (Name[14] == 'c')
2672 C = ConstantInt::getAllOnesValue(Builder.getInt16Ty());
2673 else
2674 C = ConstantInt::getNullValue(Builder.getInt16Ty());
2675 Rep = Builder.CreateICmpEQ(Rep, C);
2676 Rep = Builder.CreateZExt(Rep, Builder.getInt32Ty());
2677 } else if (IsX86 && (Name == "sse.add.ss" || Name == "sse2.add.sd" ||
2678 Name == "sse.sub.ss" || Name == "sse2.sub.sd" ||
2679 Name == "sse.mul.ss" || Name == "sse2.mul.sd" ||
2680 Name == "sse.div.ss" || Name == "sse2.div.sd")) {
2681 Type *I32Ty = Type::getInt32Ty(C);
2682 Value *Elt0 = Builder.CreateExtractElement(CI->getArgOperand(0),
2683 ConstantInt::get(I32Ty, 0));
2684 Value *Elt1 = Builder.CreateExtractElement(CI->getArgOperand(1),
2685 ConstantInt::get(I32Ty, 0));
2686 Value *EltOp;
2687 if (Name.contains(".add."))
2688 EltOp = Builder.CreateFAdd(Elt0, Elt1);
2689 else if (Name.contains(".sub."))
2690 EltOp = Builder.CreateFSub(Elt0, Elt1);
2691 else if (Name.contains(".mul."))
2692 EltOp = Builder.CreateFMul(Elt0, Elt1);
2693 else
2694 EltOp = Builder.CreateFDiv(Elt0, Elt1);
2695 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), EltOp,
2696 ConstantInt::get(I32Ty, 0));
2697 } else if (IsX86 && Name.starts_with("avx512.mask.pcmp")) {
2698 // "avx512.mask.pcmpeq." or "avx512.mask.pcmpgt."
2699 bool CmpEq = Name[16] == 'e';
2700 Rep = upgradeMaskedCompare(Builder, *CI, CmpEq ? 0 : 6, true);
2701 } else if (IsX86 && Name.starts_with("avx512.mask.vpshufbitqmb.")) {
2702 Type *OpTy = CI->getArgOperand(0)->getType();
2703 unsigned VecWidth = OpTy->getPrimitiveSizeInBits();
2704 Intrinsic::ID IID;
2705 switch (VecWidth) {
2706 default: llvm_unreachable("Unexpected intrinsic");
2707 case 128: IID = Intrinsic::x86_avx512_vpshufbitqmb_128; break;
2708 case 256: IID = Intrinsic::x86_avx512_vpshufbitqmb_256; break;
2709 case 512: IID = Intrinsic::x86_avx512_vpshufbitqmb_512; break;
2710 }
2711
2712 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
2713 { CI->getOperand(0), CI->getArgOperand(1) });
2714 Rep = applyX86MaskOn1BitsVec(Builder, Rep, CI->getArgOperand(2));
2715 } else if (IsX86 && Name.starts_with("avx512.mask.fpclass.p")) {
2716 Type *OpTy = CI->getArgOperand(0)->getType();
2717 unsigned VecWidth = OpTy->getPrimitiveSizeInBits();
2718 unsigned EltWidth = OpTy->getScalarSizeInBits();
2719 Intrinsic::ID IID;
2720 if (VecWidth == 128 && EltWidth == 32)
2721 IID = Intrinsic::x86_avx512_fpclass_ps_128;
2722 else if (VecWidth == 256 && EltWidth == 32)
2723 IID = Intrinsic::x86_avx512_fpclass_ps_256;
2724 else if (VecWidth == 512 && EltWidth == 32)
2725 IID = Intrinsic::x86_avx512_fpclass_ps_512;
2726 else if (VecWidth == 128 && EltWidth == 64)
2727 IID = Intrinsic::x86_avx512_fpclass_pd_128;
2728 else if (VecWidth == 256 && EltWidth == 64)
2729 IID = Intrinsic::x86_avx512_fpclass_pd_256;
2730 else if (VecWidth == 512 && EltWidth == 64)
2731 IID = Intrinsic::x86_avx512_fpclass_pd_512;
2732 else
2733 llvm_unreachable("Unexpected intrinsic");
2734
2735 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
2736 { CI->getOperand(0), CI->getArgOperand(1) });
2737 Rep = applyX86MaskOn1BitsVec(Builder, Rep, CI->getArgOperand(2));
2738 } else if (IsX86 && Name.starts_with("avx512.cmp.p")) {
2739 SmallVector<Value *, 4> Args(CI->args());
2740 Type *OpTy = Args[0]->getType();
2741 unsigned VecWidth = OpTy->getPrimitiveSizeInBits();
2742 unsigned EltWidth = OpTy->getScalarSizeInBits();
2743 Intrinsic::ID IID;
2744 if (VecWidth == 128 && EltWidth == 32)
2745 IID = Intrinsic::x86_avx512_mask_cmp_ps_128;
2746 else if (VecWidth == 256 && EltWidth == 32)
2747 IID = Intrinsic::x86_avx512_mask_cmp_ps_256;
2748 else if (VecWidth == 512 && EltWidth == 32)
2749 IID = Intrinsic::x86_avx512_mask_cmp_ps_512;
2750 else if (VecWidth == 128 && EltWidth == 64)
2751 IID = Intrinsic::x86_avx512_mask_cmp_pd_128;
2752 else if (VecWidth == 256 && EltWidth == 64)
2753 IID = Intrinsic::x86_avx512_mask_cmp_pd_256;
2754 else if (VecWidth == 512 && EltWidth == 64)
2755 IID = Intrinsic::x86_avx512_mask_cmp_pd_512;
2756 else
2757 llvm_unreachable("Unexpected intrinsic");
2758
2760 if (VecWidth == 512)
2761 std::swap(Mask, Args.back());
2762 Args.push_back(Mask);
2763
2764 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
2765 Args);
2766 } else if (IsX86 && Name.starts_with("avx512.mask.cmp.")) {
2767 // Integer compare intrinsics.
2768 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
2769 Rep = upgradeMaskedCompare(Builder, *CI, Imm, true);
2770 } else if (IsX86 && Name.starts_with("avx512.mask.ucmp.")) {
2771 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
2772 Rep = upgradeMaskedCompare(Builder, *CI, Imm, false);
2773 } else if (IsX86 && (Name.starts_with("avx512.cvtb2mask.") ||
2774 Name.starts_with("avx512.cvtw2mask.") ||
2775 Name.starts_with("avx512.cvtd2mask.") ||
2776 Name.starts_with("avx512.cvtq2mask."))) {
2777 Value *Op = CI->getArgOperand(0);
2778 Value *Zero = llvm::Constant::getNullValue(Op->getType());
2779 Rep = Builder.CreateICmp(ICmpInst::ICMP_SLT, Op, Zero);
2780 Rep = applyX86MaskOn1BitsVec(Builder, Rep, nullptr);
2781 } else if(IsX86 && (Name == "ssse3.pabs.b.128" ||
2782 Name == "ssse3.pabs.w.128" ||
2783 Name == "ssse3.pabs.d.128" ||
2784 Name.starts_with("avx2.pabs") ||
2785 Name.starts_with("avx512.mask.pabs"))) {
2786 Rep = upgradeAbs(Builder, *CI);
2787 } else if (IsX86 && (Name == "sse41.pmaxsb" ||
2788 Name == "sse2.pmaxs.w" ||
2789 Name == "sse41.pmaxsd" ||
2790 Name.starts_with("avx2.pmaxs") ||
2791 Name.starts_with("avx512.mask.pmaxs"))) {
2792 Rep = upgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::smax);
2793 } else if (IsX86 && (Name == "sse2.pmaxu.b" ||
2794 Name == "sse41.pmaxuw" ||
2795 Name == "sse41.pmaxud" ||
2796 Name.starts_with("avx2.pmaxu") ||
2797 Name.starts_with("avx512.mask.pmaxu"))) {
2798 Rep = upgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::umax);
2799 } else if (IsX86 && (Name == "sse41.pminsb" ||
2800 Name == "sse2.pmins.w" ||
2801 Name == "sse41.pminsd" ||
2802 Name.starts_with("avx2.pmins") ||
2803 Name.starts_with("avx512.mask.pmins"))) {
2804 Rep = upgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::smin);
2805 } else if (IsX86 && (Name == "sse2.pminu.b" ||
2806 Name == "sse41.pminuw" ||
2807 Name == "sse41.pminud" ||
2808 Name.starts_with("avx2.pminu") ||
2809 Name.starts_with("avx512.mask.pminu"))) {
2810 Rep = upgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::umin);
2811 } else if (IsX86 && (Name == "sse2.pmulu.dq" ||
2812 Name == "avx2.pmulu.dq" ||
2813 Name == "avx512.pmulu.dq.512" ||
2814 Name.starts_with("avx512.mask.pmulu.dq."))) {
2815 Rep = upgradePMULDQ(Builder, *CI, /*Signed*/false);
2816 } else if (IsX86 && (Name == "sse41.pmuldq" ||
2817 Name == "avx2.pmul.dq" ||
2818 Name == "avx512.pmul.dq.512" ||
2819 Name.starts_with("avx512.mask.pmul.dq."))) {
2820 Rep = upgradePMULDQ(Builder, *CI, /*Signed*/true);
2821 } else if (IsX86 && (Name == "sse.cvtsi2ss" ||
2822 Name == "sse2.cvtsi2sd" ||
2823 Name == "sse.cvtsi642ss" ||
2824 Name == "sse2.cvtsi642sd")) {
2825 Rep = Builder.CreateSIToFP(
2826 CI->getArgOperand(1),
2827 cast<VectorType>(CI->getType())->getElementType());
2828 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0);
2829 } else if (IsX86 && Name == "avx512.cvtusi2sd") {
2830 Rep = Builder.CreateUIToFP(
2831 CI->getArgOperand(1),
2832 cast<VectorType>(CI->getType())->getElementType());
2833 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0);
2834 } else if (IsX86 && Name == "sse2.cvtss2sd") {
2835 Rep = Builder.CreateExtractElement(CI->getArgOperand(1), (uint64_t)0);
2836 Rep = Builder.CreateFPExt(
2837 Rep, cast<VectorType>(CI->getType())->getElementType());
2838 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0);
2839 } else if (IsX86 && (Name == "sse2.cvtdq2pd" ||
2840 Name == "sse2.cvtdq2ps" ||
2841 Name == "avx.cvtdq2.pd.256" ||
2842 Name == "avx.cvtdq2.ps.256" ||
2843 Name.starts_with("avx512.mask.cvtdq2pd.") ||
2844 Name.starts_with("avx512.mask.cvtudq2pd.") ||
2845 Name.starts_with("avx512.mask.cvtdq2ps.") ||
2846 Name.starts_with("avx512.mask.cvtudq2ps.") ||
2847 Name.starts_with("avx512.mask.cvtqq2pd.") ||
2848 Name.starts_with("avx512.mask.cvtuqq2pd.") ||
2849 Name == "avx512.mask.cvtqq2ps.256" ||
2850 Name == "avx512.mask.cvtqq2ps.512" ||
2851 Name == "avx512.mask.cvtuqq2ps.256" ||
2852 Name == "avx512.mask.cvtuqq2ps.512" ||
2853 Name == "sse2.cvtps2pd" ||
2854 Name == "avx.cvt.ps2.pd.256" ||
2855 Name == "avx512.mask.cvtps2pd.128" ||
2856 Name == "avx512.mask.cvtps2pd.256")) {
2857 auto *DstTy = cast<FixedVectorType>(CI->getType());
2858 Rep = CI->getArgOperand(0);
2859 auto *SrcTy = cast<FixedVectorType>(Rep->getType());
2860
2861 unsigned NumDstElts = DstTy->getNumElements();
2862 if (NumDstElts < SrcTy->getNumElements()) {
2863 assert(NumDstElts == 2 && "Unexpected vector size");
2864 Rep = Builder.CreateShuffleVector(Rep, Rep, ArrayRef<int>{0, 1});
2865 }
2866
2867 bool IsPS2PD = SrcTy->getElementType()->isFloatTy();
2868 bool IsUnsigned = Name.contains("cvtu");
2869 if (IsPS2PD)
2870 Rep = Builder.CreateFPExt(Rep, DstTy, "cvtps2pd");
2871 else if (CI->arg_size() == 4 &&
2872 (!isa<ConstantInt>(CI->getArgOperand(3)) ||
2873 cast<ConstantInt>(CI->getArgOperand(3))->getZExtValue() != 4)) {
2874 Intrinsic::ID IID = IsUnsigned ? Intrinsic::x86_avx512_uitofp_round
2875 : Intrinsic::x86_avx512_sitofp_round;
2877 { DstTy, SrcTy });
2878 Rep = Builder.CreateCall(F, { Rep, CI->getArgOperand(3) });
2879 } else {
2880 Rep = IsUnsigned ? Builder.CreateUIToFP(Rep, DstTy, "cvt")
2881 : Builder.CreateSIToFP(Rep, DstTy, "cvt");
2882 }
2883
2884 if (CI->arg_size() >= 3)
2885 Rep = emitX86Select(Builder, CI->getArgOperand(2), Rep,
2886 CI->getArgOperand(1));
2887 } else if (IsX86 && (Name.starts_with("avx512.mask.vcvtph2ps.") ||
2888 Name.starts_with("vcvtph2ps."))) {
2889 auto *DstTy = cast<FixedVectorType>(CI->getType());
2890 Rep = CI->getArgOperand(0);
2891 auto *SrcTy = cast<FixedVectorType>(Rep->getType());
2892 unsigned NumDstElts = DstTy->getNumElements();
2893 if (NumDstElts != SrcTy->getNumElements()) {
2894 assert(NumDstElts == 4 && "Unexpected vector size");
2895 Rep = Builder.CreateShuffleVector(Rep, Rep, ArrayRef<int>{0, 1, 2, 3});
2896 }
2897 Rep = Builder.CreateBitCast(
2898 Rep, FixedVectorType::get(Type::getHalfTy(C), NumDstElts));
2899 Rep = Builder.CreateFPExt(Rep, DstTy, "cvtph2ps");
2900 if (CI->arg_size() >= 3)
2901 Rep = emitX86Select(Builder, CI->getArgOperand(2), Rep,
2902 CI->getArgOperand(1));
2903 } else if (IsX86 && Name.starts_with("avx512.mask.load")) {
2904 // "avx512.mask.loadu." or "avx512.mask.load."
2905 bool Aligned = Name[16] != 'u'; // "avx512.mask.loadu".
2906 Rep =
2907 upgradeMaskedLoad(Builder, CI->getArgOperand(0), CI->getArgOperand(1),
2908 CI->getArgOperand(2), Aligned);
2909 } else if (IsX86 && Name.starts_with("avx512.mask.expand.load.")) {
2910 auto *ResultTy = cast<FixedVectorType>(CI->getType());
2911 Type *PtrTy = ResultTy->getElementType();
2912
2913 // Cast the pointer to element type.
2914 Value *Ptr = Builder.CreateBitCast(CI->getOperand(0),
2916
2917 Value *MaskVec = getX86MaskVec(Builder, CI->getArgOperand(2),
2918 ResultTy->getNumElements());
2919
2920 Function *ELd = Intrinsic::getDeclaration(F->getParent(),
2921 Intrinsic::masked_expandload,
2922 ResultTy);
2923 Rep = Builder.CreateCall(ELd, { Ptr, MaskVec, CI->getOperand(1) });
2924 } else if (IsX86 && Name.starts_with("avx512.mask.compress.store.")) {
2925 auto *ResultTy = cast<VectorType>(CI->getArgOperand(1)->getType());
2926 Type *PtrTy = ResultTy->getElementType();
2927
2928 // Cast the pointer to element type.
2929 Value *Ptr = Builder.CreateBitCast(CI->getOperand(0),
2931
2932 Value *MaskVec =
2933 getX86MaskVec(Builder, CI->getArgOperand(2),
2934 cast<FixedVectorType>(ResultTy)->getNumElements());
2935
2936 Function *CSt = Intrinsic::getDeclaration(F->getParent(),
2937 Intrinsic::masked_compressstore,
2938 ResultTy);
2939 Rep = Builder.CreateCall(CSt, { CI->getArgOperand(1), Ptr, MaskVec });
2940 } else if (IsX86 && (Name.starts_with("avx512.mask.compress.") ||
2941 Name.starts_with("avx512.mask.expand."))) {
2942 auto *ResultTy = cast<FixedVectorType>(CI->getType());
2943
2944 Value *MaskVec = getX86MaskVec(Builder, CI->getArgOperand(2),
2945 ResultTy->getNumElements());
2946
2947 bool IsCompress = Name[12] == 'c';
2948 Intrinsic::ID IID = IsCompress ? Intrinsic::x86_avx512_mask_compress
2949 : Intrinsic::x86_avx512_mask_expand;
2950 Function *Intr = Intrinsic::getDeclaration(F->getParent(), IID, ResultTy);
2951 Rep = Builder.CreateCall(Intr, { CI->getOperand(0), CI->getOperand(1),
2952 MaskVec });
2953 } else if (IsX86 && Name.starts_with("xop.vpcom")) {
2954 bool IsSigned;
2955 if (Name.ends_with("ub") || Name.ends_with("uw") || Name.ends_with("ud") ||
2956 Name.ends_with("uq"))
2957 IsSigned = false;
2958 else if (Name.ends_with("b") || Name.ends_with("w") || Name.ends_with("d") ||
2959 Name.ends_with("q"))
2960 IsSigned = true;
2961 else
2962 llvm_unreachable("Unknown suffix");
2963
2964 unsigned Imm;
2965 if (CI->arg_size() == 3) {
2966 Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
2967 } else {
2968 Name = Name.substr(9); // strip off "xop.vpcom"
2969 if (Name.starts_with("lt"))
2970 Imm = 0;
2971 else if (Name.starts_with("le"))
2972 Imm = 1;
2973 else if (Name.starts_with("gt"))
2974 Imm = 2;
2975 else if (Name.starts_with("ge"))
2976 Imm = 3;
2977 else if (Name.starts_with("eq"))
2978 Imm = 4;
2979 else if (Name.starts_with("ne"))
2980 Imm = 5;
2981 else if (Name.starts_with("false"))
2982 Imm = 6;
2983 else if (Name.starts_with("true"))
2984 Imm = 7;
2985 else
2986 llvm_unreachable("Unknown condition");
2987 }
2988
2989 Rep = upgradeX86vpcom(Builder, *CI, Imm, IsSigned);
2990 } else if (IsX86 && Name.starts_with("xop.vpcmov")) {
2991 Value *Sel = CI->getArgOperand(2);
2992 Value *NotSel = Builder.CreateNot(Sel);
2993 Value *Sel0 = Builder.CreateAnd(CI->getArgOperand(0), Sel);
2994 Value *Sel1 = Builder.CreateAnd(CI->getArgOperand(1), NotSel);
2995 Rep = Builder.CreateOr(Sel0, Sel1);
2996 } else if (IsX86 && (Name.starts_with("xop.vprot") ||
2997 Name.starts_with("avx512.prol") ||
2998 Name.starts_with("avx512.mask.prol"))) {
2999 Rep = upgradeX86Rotate(Builder, *CI, false);
3000 } else if (IsX86 && (Name.starts_with("avx512.pror") ||
3001 Name.starts_with("avx512.mask.pror"))) {
3002 Rep = upgradeX86Rotate(Builder, *CI, true);
3003 } else if (IsX86 && (Name.starts_with("avx512.vpshld.") ||
3004 Name.starts_with("avx512.mask.vpshld") ||
3005 Name.starts_with("avx512.maskz.vpshld"))) {
3006 bool ZeroMask = Name[11] == 'z';
3007 Rep = upgradeX86ConcatShift(Builder, *CI, false, ZeroMask);
3008 } else if (IsX86 && (Name.starts_with("avx512.vpshrd.") ||
3009 Name.starts_with("avx512.mask.vpshrd") ||
3010 Name.starts_with("avx512.maskz.vpshrd"))) {
3011 bool ZeroMask = Name[11] == 'z';
3012 Rep = upgradeX86ConcatShift(Builder, *CI, true, ZeroMask);
3013 } else if (IsX86 && Name == "sse42.crc32.64.8") {
3014 Function *CRC32 = Intrinsic::getDeclaration(F->getParent(),
3015 Intrinsic::x86_sse42_crc32_32_8);
3016 Value *Trunc0 = Builder.CreateTrunc(CI->getArgOperand(0), Type::getInt32Ty(C));
3017 Rep = Builder.CreateCall(CRC32, {Trunc0, CI->getArgOperand(1)});
3018 Rep = Builder.CreateZExt(Rep, CI->getType(), "");
3019 } else if (IsX86 && (Name.starts_with("avx.vbroadcast.s") ||
3020 Name.starts_with("avx512.vbroadcast.s"))) {
3021 // Replace broadcasts with a series of insertelements.
3022 auto *VecTy = cast<FixedVectorType>(CI->getType());
3023 Type *EltTy = VecTy->getElementType();
3024 unsigned EltNum = VecTy->getNumElements();
3025 Value *Load = Builder.CreateLoad(EltTy, CI->getArgOperand(0));
3026 Type *I32Ty = Type::getInt32Ty(C);
3027 Rep = PoisonValue::get(VecTy);
3028 for (unsigned I = 0; I < EltNum; ++I)
3029 Rep = Builder.CreateInsertElement(Rep, Load,
3030 ConstantInt::get(I32Ty, I));
3031 } else if (IsX86 && (Name.starts_with("sse41.pmovsx") ||
3032 Name.starts_with("sse41.pmovzx") ||
3033 Name.starts_with("avx2.pmovsx") ||
3034 Name.starts_with("avx2.pmovzx") ||
3035 Name.starts_with("avx512.mask.pmovsx") ||
3036 Name.starts_with("avx512.mask.pmovzx"))) {
3037 auto *DstTy = cast<FixedVectorType>(CI->getType());
3038 unsigned NumDstElts = DstTy->getNumElements();
3039
3040 // Extract a subvector of the first NumDstElts lanes and sign/zero extend.
3041 SmallVector<int, 8> ShuffleMask(NumDstElts);
3042 for (unsigned i = 0; i != NumDstElts; ++i)
3043 ShuffleMask[i] = i;
3044
3045 Value *SV =
3046 Builder.CreateShuffleVector(CI->getArgOperand(0), ShuffleMask);
3047
3048 bool DoSext = Name.contains("pmovsx");
3049 Rep = DoSext ? Builder.CreateSExt(SV, DstTy)
3050 : Builder.CreateZExt(SV, DstTy);
3051 // If there are 3 arguments, it's a masked intrinsic so we need a select.
3052 if (CI->arg_size() == 3)
3053 Rep = emitX86Select(Builder, CI->getArgOperand(2), Rep,
3054 CI->getArgOperand(1));
3055 } else if (Name == "avx512.mask.pmov.qd.256" ||
3056 Name == "avx512.mask.pmov.qd.512" ||
3057 Name == "avx512.mask.pmov.wb.256" ||
3058 Name == "avx512.mask.pmov.wb.512") {
3059 Type *Ty = CI->getArgOperand(1)->getType();
3060 Rep = Builder.CreateTrunc(CI->getArgOperand(0), Ty);
3061 Rep = emitX86Select(Builder, CI->getArgOperand(2), Rep,
3062 CI->getArgOperand(1));
3063 } else if (IsX86 && (Name.starts_with("avx.vbroadcastf128") ||
3064 Name == "avx2.vbroadcasti128")) {
3065 // Replace vbroadcastf128/vbroadcasti128 with a vector load+shuffle.
3066 Type *EltTy = cast<VectorType>(CI->getType())->getElementType();
3067 unsigned NumSrcElts = 128 / EltTy->getPrimitiveSizeInBits();
3068 auto *VT = FixedVectorType::get(EltTy, NumSrcElts);
3069 Value *Op = Builder.CreatePointerCast(CI->getArgOperand(0),
3070 PointerType::getUnqual(VT));
3071 Value *Load = Builder.CreateAlignedLoad(VT, Op, Align(1));
3072 if (NumSrcElts == 2)
3073 Rep = Builder.CreateShuffleVector(Load, ArrayRef<int>{0, 1, 0, 1});
3074 else
3075 Rep = Builder.CreateShuffleVector(
3076 Load, ArrayRef<int>{0, 1, 2, 3, 0, 1, 2, 3});
3077 } else if (IsX86 && (Name.starts_with("avx512.mask.shuf.i") ||
3078 Name.starts_with("avx512.mask.shuf.f"))) {
3079 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
3080 Type *VT = CI->getType();
3081 unsigned NumLanes = VT->getPrimitiveSizeInBits() / 128;
3082 unsigned NumElementsInLane = 128 / VT->getScalarSizeInBits();
3083 unsigned ControlBitsMask = NumLanes - 1;
3084 unsigned NumControlBits = NumLanes / 2;
3085 SmallVector<int, 8> ShuffleMask(0);
3086
3087 for (unsigned l = 0; l != NumLanes; ++l) {
3088 unsigned LaneMask = (Imm >> (l * NumControlBits)) & ControlBitsMask;
3089 // We actually need the other source.
3090 if (l >= NumLanes / 2)
3091 LaneMask += NumLanes;
3092 for (unsigned i = 0; i != NumElementsInLane; ++i)
3093 ShuffleMask.push_back(LaneMask * NumElementsInLane + i);
3094 }
3095 Rep = Builder.CreateShuffleVector(CI->getArgOperand(0),
3096 CI->getArgOperand(1), ShuffleMask);
3097 Rep = emitX86Select(Builder, CI->getArgOperand(4), Rep,
3098 CI->getArgOperand(3));
3099 }else if (IsX86 && (Name.starts_with("avx512.mask.broadcastf") ||
3100 Name.starts_with("avx512.mask.broadcasti"))) {
3101 unsigned NumSrcElts =
3102 cast<FixedVectorType>(CI->getArgOperand(0)->getType())
3103 ->getNumElements();
3104 unsigned NumDstElts =
3105 cast<FixedVectorType>(CI->getType())->getNumElements();
3106
3107 SmallVector<int, 8> ShuffleMask(NumDstElts);
3108 for (unsigned i = 0; i != NumDstElts; ++i)
3109 ShuffleMask[i] = i % NumSrcElts;
3110
3111 Rep = Builder.CreateShuffleVector(CI->getArgOperand(0),
3112 CI->getArgOperand(0),
3113 ShuffleMask);
3114 Rep = emitX86Select(Builder, CI->getArgOperand(2), Rep,
3115 CI->getArgOperand(1));
3116 } else if (IsX86 && (Name.starts_with("avx2.pbroadcast") ||
3117 Name.starts_with("avx2.vbroadcast") ||
3118 Name.starts_with("avx512.pbroadcast") ||
3119 Name.starts_with("avx512.mask.broadcast.s"))) {
3120 // Replace vp?broadcasts with a vector shuffle.
3121 Value *Op = CI->getArgOperand(0);
3122 ElementCount EC = cast<VectorType>(CI->getType())->getElementCount();
3123 Type *MaskTy = VectorType::get(Type::getInt32Ty(C), EC);
3126 Rep = Builder.CreateShuffleVector(Op, M);
3127
3128 if (CI->arg_size() == 3)
3129 Rep = emitX86Select(Builder, CI->getArgOperand(2), Rep,
3130 CI->getArgOperand(1));
3131 } else if (IsX86 && (Name.starts_with("sse2.padds.") ||
3132 Name.starts_with("avx2.padds.") ||
3133 Name.starts_with("avx512.padds.") ||
3134 Name.starts_with("avx512.mask.padds."))) {
3135 Rep = upgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::sadd_sat);
3136 } else if (IsX86 && (Name.starts_with("sse2.psubs.") ||
3137 Name.starts_with("avx2.psubs.") ||
3138 Name.starts_with("avx512.psubs.") ||
3139 Name.starts_with("avx512.mask.psubs."))) {
3140 Rep = upgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::ssub_sat);
3141 } else if (IsX86 && (Name.starts_with("sse2.paddus.") ||
3142 Name.starts_with("avx2.paddus.") ||
3143 Name.starts_with("avx512.mask.paddus."))) {
3144 Rep = upgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::uadd_sat);
3145 } else if (IsX86 && (Name.starts_with("sse2.psubus.") ||
3146 Name.starts_with("avx2.psubus.") ||
3147 Name.starts_with("avx512.mask.psubus."))) {
3148 Rep = upgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::usub_sat);
3149 } else if (IsX86 && Name.starts_with("avx512.mask.palignr.")) {
3151 Builder, CI->getArgOperand(0), CI->getArgOperand(1),
3152 CI->getArgOperand(2), CI->getArgOperand(3), CI->getArgOperand(4),
3153 false);
3154 } else if (IsX86 && Name.starts_with("avx512.mask.valign.")) {
3156 Builder, CI->getArgOperand(0), CI->getArgOperand(1),
3157 CI->getArgOperand(2), CI->getArgOperand(3), CI->getArgOperand(4),
3158 true);
3159 } else if (IsX86 && (Name == "sse2.psll.dq" ||
3160 Name == "avx2.psll.dq")) {
3161 // 128/256-bit shift left specified in bits.
3162 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
3163 Rep = upgradeX86PSLLDQIntrinsics(Builder, CI->getArgOperand(0),
3164 Shift / 8); // Shift is in bits.
3165 } else if (IsX86 && (Name == "sse2.psrl.dq" ||
3166 Name == "avx2.psrl.dq")) {
3167 // 128/256-bit shift right specified in bits.
3168 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
3169 Rep = upgradeX86PSRLDQIntrinsics(Builder, CI->getArgOperand(0),
3170 Shift / 8); // Shift is in bits.
3171 } else if (IsX86 && (Name == "sse2.psll.dq.bs" ||
3172 Name == "avx2.psll.dq.bs" ||
3173 Name == "avx512.psll.dq.512")) {
3174 // 128/256/512-bit shift left specified in bytes.
3175 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
3176 Rep = upgradeX86PSLLDQIntrinsics(Builder, CI->getArgOperand(0), Shift);
3177 } else if (IsX86 && (Name == "sse2.psrl.dq.bs" ||
3178 Name == "avx2.psrl.dq.bs" ||
3179 Name == "avx512.psrl.dq.512")) {
3180 // 128/256/512-bit shift right specified in bytes.
3181 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
3182 Rep = upgradeX86PSRLDQIntrinsics(Builder, CI->getArgOperand(0), Shift);
3183 } else if (IsX86 && (Name == "sse41.pblendw" ||
3184 Name.starts_with("sse41.blendp") ||
3185 Name.starts_with("avx.blend.p") ||
3186 Name == "avx2.pblendw" ||
3187 Name.starts_with("avx2.pblendd."))) {
3188 Value *Op0 = CI->getArgOperand(0);
3189 Value *Op1 = CI->getArgOperand(1);
3190 unsigned Imm = cast <ConstantInt>(CI->getArgOperand(2))->getZExtValue();
3191 auto *VecTy = cast<FixedVectorType>(CI->getType());
3192 unsigned NumElts = VecTy->getNumElements();
3193
3194 SmallVector<int, 16> Idxs(NumElts);
3195 for (unsigned i = 0; i != NumElts; ++i)
3196 Idxs[i] = ((Imm >> (i%8)) & 1) ? i + NumElts : i;
3197
3198 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
3199 } else if (IsX86 && (Name.starts_with("avx.vinsertf128.") ||
3200 Name == "avx2.vinserti128" ||
3201 Name.starts_with("avx512.mask.insert"))) {
3202 Value *Op0 = CI->getArgOperand(0);
3203 Value *Op1 = CI->getArgOperand(1);
3204 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
3205 unsigned DstNumElts =
3206 cast<FixedVectorType>(CI->getType())->getNumElements();
3207 unsigned SrcNumElts =
3208 cast<FixedVectorType>(Op1->getType())->getNumElements();
3209 unsigned Scale = DstNumElts / SrcNumElts;
3210
3211 // Mask off the high bits of the immediate value; hardware ignores those.
3212 Imm = Imm % Scale;
3213
3214 // Extend the second operand into a vector the size of the destination.
3215 SmallVector<int, 8> Idxs(DstNumElts);
3216 for (unsigned i = 0; i != SrcNumElts; ++i)
3217 Idxs[i] = i;
3218 for (unsigned i = SrcNumElts; i != DstNumElts; ++i)
3219 Idxs[i] = SrcNumElts;
3220 Rep = Builder.CreateShuffleVector(Op1, Idxs);
3221
3222 // Insert the second operand into the first operand.
3223
3224 // Note that there is no guarantee that instruction lowering will actually
3225 // produce a vinsertf128 instruction for the created shuffles. In
3226 // particular, the 0 immediate case involves no lane changes, so it can
3227 // be handled as a blend.
3228
3229 // Example of shuffle mask for 32-bit elements:
3230 // Imm = 1 <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
3231 // Imm = 0 <i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7 >
3232
3233 // First fill with identify mask.
3234 for (unsigned i = 0; i != DstNumElts; ++i)
3235 Idxs[i] = i;
3236 // Then replace the elements where we need to insert.
3237 for (unsigned i = 0; i != SrcNumElts; ++i)
3238 Idxs[i + Imm * SrcNumElts] = i + DstNumElts;
3239 Rep = Builder.CreateShuffleVector(Op0, Rep, Idxs);
3240
3241 // If the intrinsic has a mask operand, handle that.
3242 if (CI->arg_size() == 5)
3243 Rep = emitX86Select(Builder, CI->getArgOperand(4), Rep,
3244 CI->getArgOperand(3));
3245 } else if (IsX86 && (Name.starts_with("avx.vextractf128.") ||
3246 Name == "avx2.vextracti128" ||
3247 Name.starts_with("avx512.mask.vextract"))) {
3248 Value *Op0 = CI->getArgOperand(0);
3249 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
3250 unsigned DstNumElts =
3251 cast<FixedVectorType>(CI->getType())->getNumElements();
3252 unsigned SrcNumElts =
3253 cast<FixedVectorType>(Op0->getType())->getNumElements();
3254 unsigned Scale = SrcNumElts / DstNumElts;
3255
3256 // Mask off the high bits of the immediate value; hardware ignores those.
3257 Imm = Imm % Scale;
3258
3259 // Get indexes for the subvector of the input vector.
3260 SmallVector<int, 8> Idxs(DstNumElts);
3261 for (unsigned i = 0; i != DstNumElts; ++i) {
3262 Idxs[i] = i + (Imm * DstNumElts);
3263 }
3264 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
3265
3266 // If the intrinsic has a mask operand, handle that.
3267 if (CI->arg_size() == 4)
3268 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3269 CI->getArgOperand(2));
3270 } else if (!IsX86 && Name == "stackprotectorcheck") {
3271 Rep = nullptr;
3272 } else if (IsX86 && (Name.starts_with("avx512.mask.perm.df.") ||
3273 Name.starts_with("avx512.mask.perm.di."))) {
3274 Value *Op0 = CI->getArgOperand(0);
3275 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
3276 auto *VecTy = cast<FixedVectorType>(CI->getType());
3277 unsigned NumElts = VecTy->getNumElements();
3278
3279 SmallVector<int, 8> Idxs(NumElts);
3280 for (unsigned i = 0; i != NumElts; ++i)
3281 Idxs[i] = (i & ~0x3) + ((Imm >> (2 * (i & 0x3))) & 3);
3282
3283 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
3284
3285 if (CI->arg_size() == 4)
3286 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3287 CI->getArgOperand(2));
3288 } else if (IsX86 && (Name.starts_with("avx.vperm2f128.") ||
3289 Name == "avx2.vperm2i128")) {
3290 // The immediate permute control byte looks like this:
3291 // [1:0] - select 128 bits from sources for low half of destination
3292 // [2] - ignore
3293 // [3] - zero low half of destination
3294 // [5:4] - select 128 bits from sources for high half of destination
3295 // [6] - ignore
3296 // [7] - zero high half of destination
3297
3298 uint8_t Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
3299
3300 unsigned NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
3301 unsigned HalfSize = NumElts / 2;
3302 SmallVector<int, 8> ShuffleMask(NumElts);
3303
3304 // Determine which operand(s) are actually in use for this instruction.
3305 Value *V0 = (Imm & 0x02) ? CI->getArgOperand(1) : CI->getArgOperand(0);
3306 Value *V1 = (Imm & 0x20) ? CI->getArgOperand(1) : CI->getArgOperand(0);
3307
3308 // If needed, replace operands based on zero mask.
3309 V0 = (Imm & 0x08) ? ConstantAggregateZero::get(CI->getType()) : V0;
3310 V1 = (Imm & 0x80) ? ConstantAggregateZero::get(CI->getType()) : V1;
3311
3312 // Permute low half of result.
3313 unsigned StartIndex = (Imm & 0x01) ? HalfSize : 0;
3314 for (unsigned i = 0; i < HalfSize; ++i)
3315 ShuffleMask[i] = StartIndex + i;
3316
3317 // Permute high half of result.
3318 StartIndex = (Imm & 0x10) ? HalfSize : 0;
3319 for (unsigned i = 0; i < HalfSize; ++i)
3320 ShuffleMask[i + HalfSize] = NumElts + StartIndex + i;
3321
3322 Rep = Builder.CreateShuffleVector(V0, V1, ShuffleMask);
3323
3324 } else if (IsX86 && (Name.starts_with("avx.vpermil.") ||
3325 Name == "sse2.pshuf.d" ||
3326 Name.starts_with("avx512.mask.vpermil.p") ||
3327 Name.starts_with("avx512.mask.pshuf.d."))) {
3328 Value *Op0 = CI->getArgOperand(0);
3329 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
3330 auto *VecTy = cast<FixedVectorType>(CI->getType());
3331 unsigned NumElts = VecTy->getNumElements();
3332 // Calculate the size of each index in the immediate.
3333 unsigned IdxSize = 64 / VecTy->getScalarSizeInBits();
3334 unsigned IdxMask = ((1 << IdxSize) - 1);
3335
3336 SmallVector<int, 8> Idxs(NumElts);
3337 // Lookup the bits for this element, wrapping around the immediate every
3338 // 8-bits. Elements are grouped into sets of 2 or 4 elements so we need
3339 // to offset by the first index of each group.
3340 for (unsigned i = 0; i != NumElts; ++i)
3341 Idxs[i] = ((Imm >> ((i * IdxSize) % 8)) & IdxMask) | (i & ~IdxMask);
3342
3343 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
3344
3345 if (CI->arg_size() == 4)
3346 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3347 CI->getArgOperand(2));
3348 } else if (IsX86 && (Name == "sse2.pshufl.w" ||
3349 Name.starts_with("avx512.mask.pshufl.w."))) {
3350 Value *Op0 = CI->getArgOperand(0);
3351 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
3352 unsigned NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
3353
3354 SmallVector<int, 16> Idxs(NumElts);
3355 for (unsigned l = 0; l != NumElts; l += 8) {
3356 for (unsigned i = 0; i != 4; ++i)
3357 Idxs[i + l] = ((Imm >> (2 * i)) & 0x3) + l;
3358 for (unsigned i = 4; i != 8; ++i)
3359 Idxs[i + l] = i + l;
3360 }
3361
3362 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
3363
3364 if (CI->arg_size() == 4)
3365 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3366 CI->getArgOperand(2));
3367 } else if (IsX86 && (Name == "sse2.pshufh.w" ||
3368 Name.starts_with("avx512.mask.pshufh.w."))) {
3369 Value *Op0 = CI->getArgOperand(0);
3370 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
3371 unsigned NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
3372
3373 SmallVector<int, 16> Idxs(NumElts);
3374 for (unsigned l = 0; l != NumElts; l += 8) {
3375 for (unsigned i = 0; i != 4; ++i)
3376 Idxs[i + l] = i + l;
3377 for (unsigned i = 0; i != 4; ++i)
3378 Idxs[i + l + 4] = ((Imm >> (2 * i)) & 0x3) + 4 + l;
3379 }
3380
3381 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
3382
3383 if (CI->arg_size() == 4)
3384 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3385 CI->getArgOperand(2));
3386 } else if (IsX86 && Name.starts_with("avx512.mask.shuf.p")) {
3387 Value *Op0 = CI->getArgOperand(0);
3388 Value *Op1 = CI->getArgOperand(1);
3389 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
3390 unsigned NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
3391
3392 unsigned NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
3393 unsigned HalfLaneElts = NumLaneElts / 2;
3394
3395 SmallVector<int, 16> Idxs(NumElts);
3396 for (unsigned i = 0; i != NumElts; ++i) {
3397 // Base index is the starting element of the lane.
3398 Idxs[i] = i - (i % NumLaneElts);
3399 // If we are half way through the lane switch to the other source.
3400 if ((i % NumLaneElts) >= HalfLaneElts)
3401 Idxs[i] += NumElts;
3402 // Now select the specific element. By adding HalfLaneElts bits from
3403 // the immediate. Wrapping around the immediate every 8-bits.
3404 Idxs[i] += (Imm >> ((i * HalfLaneElts) % 8)) & ((1 << HalfLaneElts) - 1);
3405 }
3406
3407 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
3408
3409 Rep = emitX86Select(Builder, CI->getArgOperand(4), Rep,
3410 CI->getArgOperand(3));
3411 } else if (IsX86 && (Name.starts_with("avx512.mask.movddup") ||
3412 Name.starts_with("avx512.mask.movshdup") ||
3413 Name.starts_with("avx512.mask.movsldup"))) {
3414 Value *Op0 = CI->getArgOperand(0);
3415 unsigned NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
3416 unsigned NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
3417
3418 unsigned Offset = 0;
3419 if (Name.starts_with("avx512.mask.movshdup."))
3420 Offset = 1;
3421
3422 SmallVector<int, 16> Idxs(NumElts);
3423 for (unsigned l = 0; l != NumElts; l += NumLaneElts)
3424 for (unsigned i = 0; i != NumLaneElts; i += 2) {
3425 Idxs[i + l + 0] = i + l + Offset;
3426 Idxs[i + l + 1] = i + l + Offset;
3427 }
3428
3429 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
3430
3431 Rep = emitX86Select(Builder, CI->getArgOperand(2), Rep,
3432 CI->getArgOperand(1));
3433 } else if (IsX86 && (Name.starts_with("avx512.mask.punpckl") ||
3434 Name.starts_with("avx512.mask.unpckl."))) {
3435 Value *Op0 = CI->getArgOperand(0);
3436 Value *Op1 = CI->getArgOperand(1);
3437 int NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
3438 int NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
3439
3440 SmallVector<int, 64> Idxs(NumElts);
3441 for (int l = 0; l != NumElts; l += NumLaneElts)
3442 for (int i = 0; i != NumLaneElts; ++i)
3443 Idxs[i + l] = l + (i / 2) + NumElts * (i % 2);
3444
3445 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
3446
3447 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3448 CI->getArgOperand(2));
3449 } else if (IsX86 && (Name.starts_with("avx512.mask.punpckh") ||
3450 Name.starts_with("avx512.mask.unpckh."))) {
3451 Value *Op0 = CI->getArgOperand(0);
3452 Value *Op1 = CI->getArgOperand(1);
3453 int NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
3454 int NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
3455
3456 SmallVector<int, 64> Idxs(NumElts);
3457 for (int l = 0; l != NumElts; l += NumLaneElts)
3458 for (int i = 0; i != NumLaneElts; ++i)
3459 Idxs[i + l] = (NumLaneElts / 2) + l + (i / 2) + NumElts * (i % 2);
3460
3461 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
3462
3463 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3464 CI->getArgOperand(2));
3465 } else if (IsX86 && (Name.starts_with("avx512.mask.and.") ||
3466 Name.starts_with("avx512.mask.pand."))) {
3467 VectorType *FTy = cast<VectorType>(CI->getType());
3468 VectorType *ITy = VectorType::getInteger(FTy);
3469 Rep = Builder.CreateAnd(Builder.CreateBitCast(CI->getArgOperand(0), ITy),
3470 Builder.CreateBitCast(CI->getArgOperand(1), ITy));
3471 Rep = Builder.CreateBitCast(Rep, FTy);
3472 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3473 CI->getArgOperand(2));
3474 } else if (IsX86 && (Name.starts_with("avx512.mask.andn.") ||
3475 Name.starts_with("avx512.mask.pandn."))) {
3476 VectorType *FTy = cast<VectorType>(CI->getType());
3477 VectorType *ITy = VectorType::getInteger(FTy);
3478 Rep = Builder.CreateNot(Builder.CreateBitCast(CI->getArgOperand(0), ITy));
3479 Rep = Builder.CreateAnd(Rep,
3480 Builder.CreateBitCast(CI->getArgOperand(1), ITy));
3481 Rep = Builder.CreateBitCast(Rep, FTy);
3482 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3483 CI->getArgOperand(2));
3484 } else if (IsX86 && (Name.starts_with("avx512.mask.or.") ||
3485 Name.starts_with("avx512.mask.por."))) {
3486 VectorType *FTy = cast<VectorType>(CI->getType());
3487 VectorType *ITy = VectorType::getInteger(FTy);
3488 Rep = Builder.CreateOr(Builder.CreateBitCast(CI->getArgOperand(0), ITy),
3489 Builder.CreateBitCast(CI->getArgOperand(1), ITy));
3490 Rep = Builder.CreateBitCast(Rep, FTy);
3491 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3492 CI->getArgOperand(2));
3493 } else if (IsX86 && (Name.starts_with("avx512.mask.xor.") ||
3494 Name.starts_with("avx512.mask.pxor."))) {
3495 VectorType *FTy = cast<VectorType>(CI->getType());
3496 VectorType *ITy = VectorType::getInteger(FTy);
3497 Rep = Builder.CreateXor(Builder.CreateBitCast(CI->getArgOperand(0), ITy),
3498 Builder.CreateBitCast(CI->getArgOperand(1), ITy));
3499 Rep = Builder.CreateBitCast(Rep, FTy);
3500 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3501 CI->getArgOperand(2));
3502 } else if (IsX86 && Name.starts_with("avx512.mask.padd.")) {
3503 Rep = Builder.CreateAdd(CI->getArgOperand(0), CI->getArgOperand(1));
3504 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3505 CI->getArgOperand(2));
3506 } else if (IsX86 && Name.starts_with("avx512.mask.psub.")) {
3507 Rep = Builder.CreateSub(CI->getArgOperand(0), CI->getArgOperand(1));
3508 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3509 CI->getArgOperand(2));
3510 } else if (IsX86 && Name.starts_with("avx512.mask.pmull.")) {
3511 Rep = Builder.CreateMul(CI->getArgOperand(0), CI->getArgOperand(1));
3512 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3513 CI->getArgOperand(2));
3514 } else if (IsX86 && Name.starts_with("avx512.mask.add.p")) {
3515 if (Name.ends_with(".512")) {
3516 Intrinsic::ID IID;
3517 if (Name[17] == 's')
3518 IID = Intrinsic::x86_avx512_add_ps_512;
3519 else
3520 IID = Intrinsic::x86_avx512_add_pd_512;
3521
3522 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
3523 { CI->getArgOperand(0), CI->getArgOperand(1),
3524 CI->getArgOperand(4) });
3525 } else {
3526 Rep = Builder.CreateFAdd(CI->getArgOperand(0), CI->getArgOperand(1));
3527 }
3528 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3529 CI->getArgOperand(2));
3530 } else if (IsX86 && Name.starts_with("avx512.mask.div.p")) {
3531 if (Name.ends_with(".512")) {
3532 Intrinsic::ID IID;
3533 if (Name[17] == 's')
3534 IID = Intrinsic::x86_avx512_div_ps_512;
3535 else
3536 IID = Intrinsic::x86_avx512_div_pd_512;
3537
3538 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
3539 { CI->getArgOperand(0), CI->getArgOperand(1),
3540 CI->getArgOperand(4) });
3541 } else {
3542 Rep = Builder.CreateFDiv(CI->getArgOperand(0), CI->getArgOperand(1));
3543 }
3544 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3545 CI->getArgOperand(2));
3546 } else if (IsX86 && Name.starts_with("avx512.mask.mul.p")) {
3547 if (Name.ends_with(".512")) {
3548 Intrinsic::ID IID;
3549 if (Name[17] == 's')
3550 IID = Intrinsic::x86_avx512_mul_ps_512;
3551 else
3552 IID = Intrinsic::x86_avx512_mul_pd_512;
3553
3554 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
3555 { CI->getArgOperand(0), CI->getArgOperand(1),
3556 CI->getArgOperand(4) });
3557 } else {
3558 Rep = Builder.CreateFMul(CI->getArgOperand(0), CI->getArgOperand(1));
3559 }
3560 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3561 CI->getArgOperand(2));
3562 } else if (IsX86 && Name.starts_with("avx512.mask.sub.p")) {
3563 if (Name.ends_with(".512")) {
3564 Intrinsic::ID IID;
3565 if (Name[17] == 's')
3566 IID = Intrinsic::x86_avx512_sub_ps_512;
3567 else
3568 IID = Intrinsic::x86_avx512_sub_pd_512;
3569
3570 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
3571 { CI->getArgOperand(0), CI->getArgOperand(1),
3572 CI->getArgOperand(4) });
3573 } else {
3574 Rep = Builder.CreateFSub(CI->getArgOperand(0), CI->getArgOperand(1));
3575 }
3576 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3577 CI->getArgOperand(2));
3578 } else if (IsX86 && (Name.starts_with("avx512.mask.max.p") ||
3579 Name.starts_with("avx512.mask.min.p")) &&
3580 Name.drop_front(18) == ".512") {
3581 bool IsDouble = Name[17] == 'd';
3582 bool IsMin = Name[13] == 'i';
3583 static const Intrinsic::ID MinMaxTbl[2][2] = {
3584 { Intrinsic::x86_avx512_max_ps_512, Intrinsic::x86_avx512_max_pd_512 },
3585 { Intrinsic::x86_avx512_min_ps_512, Intrinsic::x86_avx512_min_pd_512 }
3586 };
3587 Intrinsic::ID IID = MinMaxTbl[IsMin][IsDouble];
3588
3589 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
3590 { CI->getArgOperand(0), CI->getArgOperand(1),
3591 CI->getArgOperand(4) });
3592 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3593 CI->getArgOperand(2));
3594 } else if (IsX86 && Name.starts_with("avx512.mask.lzcnt.")) {
3595 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(),
3596 Intrinsic::ctlz,
3597 CI->getType()),
3598 { CI->getArgOperand(0), Builder.getInt1(false) });
3599 Rep = emitX86Select(Builder, CI->getArgOperand(2), Rep,
3600 CI->getArgOperand(1));
3601 } else if (IsX86 && Name.starts_with("avx512.mask.psll")) {
3602 bool IsImmediate = Name[16] == 'i' ||
3603 (Name.size() > 18 && Name[18] == 'i');
3604 bool IsVariable = Name[16] == 'v';
3605 char Size = Name[16] == '.' ? Name[17] :
3606 Name[17] == '.' ? Name[18] :
3607 Name[18] == '.' ? Name[19] :
3608 Name[20];
3609
3610 Intrinsic::ID IID;
3611 if (IsVariable && Name[17] != '.') {
3612 if (Size == 'd' && Name[17] == '2') // avx512.mask.psllv2.di
3613 IID = Intrinsic::x86_avx2_psllv_q;
3614 else if (Size == 'd' && Name[17] == '4') // avx512.mask.psllv4.di
3615 IID = Intrinsic::x86_avx2_psllv_q_256;
3616 else if (Size == 's' && Name[17] == '4') // avx512.mask.psllv4.si
3617 IID = Intrinsic::x86_avx2_psllv_d;
3618 else if (Size == 's' && Name[17] == '8') // avx512.mask.psllv8.si
3619 IID = Intrinsic::x86_avx2_psllv_d_256;
3620 else if (Size == 'h' && Name[17] == '8') // avx512.mask.psllv8.hi
3621 IID = Intrinsic::x86_avx512_psllv_w_128;
3622 else if (Size == 'h' && Name[17] == '1') // avx512.mask.psllv16.hi
3623 IID = Intrinsic::x86_avx512_psllv_w_256;
3624 else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psllv32hi
3625 IID = Intrinsic::x86_avx512_psllv_w_512;
3626 else
3627 llvm_unreachable("Unexpected size");
3628 } else if (Name.ends_with(".128")) {
3629 if (Size == 'd') // avx512.mask.psll.d.128, avx512.mask.psll.di.128
3630 IID = IsImmediate ? Intrinsic::x86_sse2_pslli_d
3631 : Intrinsic::x86_sse2_psll_d;
3632 else if (Size == 'q') // avx512.mask.psll.q.128, avx512.mask.psll.qi.128
3633 IID = IsImmediate ? Intrinsic::x86_sse2_pslli_q
3634 : Intrinsic::x86_sse2_psll_q;
3635 else if (Size == 'w') // avx512.mask.psll.w.128, avx512.mask.psll.wi.128
3636 IID = IsImmediate ? Intrinsic::x86_sse2_pslli_w
3637 : Intrinsic::x86_sse2_psll_w;
3638 else
3639 llvm_unreachable("Unexpected size");
3640 } else if (Name.ends_with(".256")) {
3641 if (Size == 'd') // avx512.mask.psll.d.256, avx512.mask.psll.di.256
3642 IID = IsImmediate ? Intrinsic::x86_avx2_pslli_d
3643 : Intrinsic::x86_avx2_psll_d;
3644 else if (Size == 'q') // avx512.mask.psll.q.256, avx512.mask.psll.qi.256
3645 IID = IsImmediate ? Intrinsic::x86_avx2_pslli_q
3646 : Intrinsic::x86_avx2_psll_q;
3647 else if (Size == 'w') // avx512.mask.psll.w.256, avx512.mask.psll.wi.256
3648 IID = IsImmediate ? Intrinsic::x86_avx2_pslli_w
3649 : Intrinsic::x86_avx2_psll_w;
3650 else
3651 llvm_unreachable("Unexpected size");
3652 } else {
3653 if (Size == 'd') // psll.di.512, pslli.d, psll.d, psllv.d.512
3654 IID = IsImmediate ? Intrinsic::x86_avx512_pslli_d_512 :
3655 IsVariable ? Intrinsic::x86_avx512_psllv_d_512 :
3656 Intrinsic::x86_avx512_psll_d_512;
3657 else if (Size == 'q') // psll.qi.512, pslli.q, psll.q, psllv.q.512
3658 IID = IsImmediate ? Intrinsic::x86_avx512_pslli_q_512 :
3659 IsVariable ? Intrinsic::x86_avx512_psllv_q_512 :
3660 Intrinsic::x86_avx512_psll_q_512;
3661 else if (Size == 'w') // psll.wi.512, pslli.w, psll.w
3662 IID = IsImmediate ? Intrinsic::x86_avx512_pslli_w_512
3663 : Intrinsic::x86_avx512_psll_w_512;
3664 else
3665 llvm_unreachable("Unexpected size");
3666 }
3667
3668 Rep = upgradeX86MaskedShift(Builder, *CI, IID);
3669 } else if (IsX86 && Name.starts_with("avx512.mask.psrl")) {
3670 bool IsImmediate = Name[16] == 'i' ||
3671 (Name.size() > 18 && Name[18] == 'i');
3672 bool IsVariable = Name[16] == 'v';
3673 char Size = Name[16] == '.' ? Name[17] :
3674 Name[17] == '.' ? Name[18] :
3675 Name[18] == '.' ? Name[19] :
3676 Name[20];
3677
3678 Intrinsic::ID IID;
3679 if (IsVariable && Name[17] != '.') {
3680 if (Size == 'd' && Name[17] == '2') // avx512.mask.psrlv2.di
3681 IID = Intrinsic::x86_avx2_psrlv_q;
3682 else if (Size == 'd' && Name[17] == '4') // avx512.mask.psrlv4.di
3683 IID = Intrinsic::x86_avx2_psrlv_q_256;
3684 else if (Size == 's' && Name[17] == '4') // avx512.mask.psrlv4.si
3685 IID = Intrinsic::x86_avx2_psrlv_d;
3686 else if (Size == 's' && Name[17] == '8') // avx512.mask.psrlv8.si
3687 IID = Intrinsic::x86_avx2_psrlv_d_256;
3688 else if (Size == 'h' && Name[17] == '8') // avx512.mask.psrlv8.hi
3689 IID = Intrinsic::x86_avx512_psrlv_w_128;
3690 else if (Size == 'h' && Name[17] == '1') // avx512.mask.psrlv16.hi
3691 IID = Intrinsic::x86_avx512_psrlv_w_256;
3692 else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psrlv32hi
3693 IID = Intrinsic::x86_avx512_psrlv_w_512;
3694 else
3695 llvm_unreachable("Unexpected size");
3696 } else if (Name.ends_with(".128")) {
3697 if (Size == 'd') // avx512.mask.psrl.d.128, avx512.mask.psrl.di.128
3698 IID = IsImmediate ? Intrinsic::x86_sse2_psrli_d
3699 : Intrinsic::x86_sse2_psrl_d;
3700 else if (Size == 'q') // avx512.mask.psrl.q.128, avx512.mask.psrl.qi.128
3701 IID = IsImmediate ? Intrinsic::x86_sse2_psrli_q
3702 : Intrinsic::x86_sse2_psrl_q;
3703 else if (Size == 'w') // avx512.mask.psrl.w.128, avx512.mask.psrl.wi.128
3704 IID = IsImmediate ? Intrinsic::x86_sse2_psrli_w
3705 : Intrinsic::x86_sse2_psrl_w;
3706 else
3707 llvm_unreachable("Unexpected size");
3708 } else if (Name.ends_with(".256")) {
3709 if (Size == 'd') // avx512.mask.psrl.d.256, avx512.mask.psrl.di.256
3710 IID = IsImmediate ? Intrinsic::x86_avx2_psrli_d
3711 : Intrinsic::x86_avx2_psrl_d;
3712 else if (Size == 'q') // avx512.mask.psrl.q.256, avx512.mask.psrl.qi.256
3713 IID = IsImmediate ? Intrinsic::x86_avx2_psrli_q
3714 : Intrinsic::x86_avx2_psrl_q;
3715 else if (Size == 'w') // avx512.mask.psrl.w.256, avx512.mask.psrl.wi.256
3716 IID = IsImmediate ? Intrinsic::x86_avx2_psrli_w
3717 : Intrinsic::x86_avx2_psrl_w;
3718 else
3719 llvm_unreachable("Unexpected size");
3720 } else {
3721 if (Size == 'd') // psrl.di.512, psrli.d, psrl.d, psrl.d.512
3722 IID = IsImmediate ? Intrinsic::x86_avx512_psrli_d_512 :
3723 IsVariable ? Intrinsic::x86_avx512_psrlv_d_512 :
3724 Intrinsic::x86_avx512_psrl_d_512;
3725 else if (Size == 'q') // psrl.qi.512, psrli.q, psrl.q, psrl.q.512
3726 IID = IsImmediate ? Intrinsic::x86_avx512_psrli_q_512 :
3727 IsVariable ? Intrinsic::x86_avx512_psrlv_q_512 :
3728 Intrinsic::x86_avx512_psrl_q_512;
3729 else if (Size == 'w') // psrl.wi.512, psrli.w, psrl.w)
3730 IID = IsImmediate ? Intrinsic::x86_avx512_psrli_w_512
3731 : Intrinsic::x86_avx512_psrl_w_512;
3732 else
3733 llvm_unreachable("Unexpected size");
3734 }
3735
3736 Rep = upgradeX86MaskedShift(Builder, *CI, IID);
3737 } else if (IsX86 && Name.starts_with("avx512.mask.psra")) {
3738 bool IsImmediate = Name[16] == 'i' ||
3739 (Name.size() > 18 && Name[18] == 'i');
3740 bool IsVariable = Name[16] == 'v';
3741 char Size = Name[16] == '.' ? Name[17] :
3742 Name[17] == '.' ? Name[18] :
3743 Name[18] == '.' ? Name[19] :
3744 Name[20];
3745
3746 Intrinsic::ID IID;
3747 if (IsVariable && Name[17] != '.') {
3748 if (Size == 's' && Name[17] == '4') // avx512.mask.psrav4.si
3749 IID = Intrinsic::x86_avx2_psrav_d;
3750 else if (Size == 's' && Name[17] == '8') // avx512.mask.psrav8.si
3751 IID = Intrinsic::x86_avx2_psrav_d_256;
3752 else if (Size == 'h' && Name[17] == '8') // avx512.mask.psrav8.hi
3753 IID = Intrinsic::x86_avx512_psrav_w_128;
3754 else if (Size == 'h' && Name[17] == '1') // avx512.mask.psrav16.hi
3755 IID = Intrinsic::x86_avx512_psrav_w_256;
3756 else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psrav32hi
3757 IID = Intrinsic::x86_avx512_psrav_w_512;
3758 else
3759 llvm_unreachable("Unexpected size");
3760 } else if (Name.ends_with(".128")) {
3761 if (Size == 'd') // avx512.mask.psra.d.128, avx512.mask.psra.di.128
3762 IID = IsImmediate ? Intrinsic::x86_sse2_psrai_d
3763 : Intrinsic::x86_sse2_psra_d;
3764 else if (Size == 'q') // avx512.mask.psra.q.128, avx512.mask.psra.qi.128
3765 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_128 :
3766 IsVariable ? Intrinsic::x86_avx512_psrav_q_128 :
3767 Intrinsic::x86_avx512_psra_q_128;
3768 else if (Size == 'w') // avx512.mask.psra.w.128, avx512.mask.psra.wi.128
3769 IID = IsImmediate ? Intrinsic::x86_sse2_psrai_w
3770 : Intrinsic::x86_sse2_psra_w;
3771 else
3772 llvm_unreachable("Unexpected size");
3773 } else if (Name.ends_with(".256")) {
3774 if (Size == 'd') // avx512.mask.psra.d.256, avx512.mask.psra.di.256
3775 IID = IsImmediate ? Intrinsic::x86_avx2_psrai_d
3776 : Intrinsic::x86_avx2_psra_d;
3777 else if (Size == 'q') // avx512.mask.psra.q.256, avx512.mask.psra.qi.256
3778 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_256 :
3779 IsVariable ? Intrinsic::x86_avx512_psrav_q_256 :
3780 Intrinsic::x86_avx512_psra_q_256;
3781 else if (Size == 'w') // avx512.mask.psra.w.256, avx512.mask.psra.wi.256
3782 IID = IsImmediate ? Intrinsic::x86_avx2_psrai_w
3783 : Intrinsic::x86_avx2_psra_w;
3784 else
3785 llvm_unreachable("Unexpected size");
3786 } else {
3787 if (Size == 'd') // psra.di.512, psrai.d, psra.d, psrav.d.512
3788 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_d_512 :
3789 IsVariable ? Intrinsic::x86_avx512_psrav_d_512 :
3790 Intrinsic::x86_avx512_psra_d_512;
3791 else if (Size == 'q') // psra.qi.512, psrai.q, psra.q
3792 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_512 :
3793 IsVariable ? Intrinsic::x86_avx512_psrav_q_512 :
3794 Intrinsic::x86_avx512_psra_q_512;
3795 else if (Size == 'w') // psra.wi.512, psrai.w, psra.w
3796 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_w_512
3797 : Intrinsic::x86_avx512_psra_w_512;
3798 else
3799 llvm_unreachable("Unexpected size");
3800 }
3801
3802 Rep = upgradeX86MaskedShift(Builder, *CI, IID);
3803 } else if (IsX86 && Name.starts_with("avx512.mask.move.s")) {
3804 Rep = upgradeMaskedMove(Builder, *CI);
3805 } else if (IsX86 && Name.starts_with("avx512.cvtmask2")) {
3806 Rep = upgradeMaskToInt(Builder, *CI);
3807 } else if (IsX86 && Name.ends_with(".movntdqa")) {
3808 MDNode *Node = MDNode::get(
3809 C, ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1)));
3810
3811 Value *Ptr = CI->getArgOperand(0);
3812
3813 // Convert the type of the pointer to a pointer to the stored type.
3814 Value *BC = Builder.CreateBitCast(
3815 Ptr, PointerType::getUnqual(CI->getType()), "cast");
3816 LoadInst *LI = Builder.CreateAlignedLoad(
3817 CI->getType(), BC,
3819 LI->setMetadata(LLVMContext::MD_nontemporal, Node);
3820 Rep = LI;
3821 } else if (IsX86 && (Name.starts_with("fma.vfmadd.") ||
3822 Name.starts_with("fma.vfmsub.") ||
3823 Name.starts_with("fma.vfnmadd.") ||
3824 Name.starts_with("fma.vfnmsub."))) {
3825 bool NegMul = Name[6] == 'n';
3826 bool NegAcc = NegMul ? Name[8] == 's' : Name[7] == 's';
3827 bool IsScalar = NegMul ? Name[12] == 's' : Name[11] == 's';
3828
3829 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1),
3830 CI->getArgOperand(2) };
3831
3832 if (IsScalar) {
3833 Ops[0] = Builder.CreateExtractElement(Ops[0], (uint64_t)0);
3834 Ops[1] = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
3835 Ops[2] = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
3836 }
3837
3838 if (NegMul && !IsScalar)
3839 Ops[0] = Builder.CreateFNeg(Ops[0]);
3840 if (NegMul && IsScalar)
3841 Ops[1] = Builder.CreateFNeg(Ops[1]);
3842 if (NegAcc)
3843 Ops[2] = Builder.CreateFNeg(Ops[2]);
3844
3846 Intrinsic::fma,
3847 Ops[0]->getType()),
3848 Ops);
3849
3850 if (IsScalar)
3851 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep,
3852 (uint64_t)0);
3853 } else if (IsX86 && Name.starts_with("fma4.vfmadd.s")) {
3854 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1),
3855 CI->getArgOperand(2) };
3856
3857 Ops[0] = Builder.CreateExtractElement(Ops[0], (uint64_t)0);
3858 Ops[1] = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
3859 Ops[2] = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
3860
3862 Intrinsic::fma,
3863 Ops[0]->getType()),
3864 Ops);
3865
3867 Rep, (uint64_t)0);
3868 } else if (IsX86 && (Name.starts_with("avx512.mask.vfmadd.s") ||
3869 Name.starts_with("avx512.maskz.vfmadd.s") ||
3870 Name.starts_with("avx512.mask3.vfmadd.s") ||
3871 Name.starts_with("avx512.mask3.vfmsub.s") ||
3872 Name.starts_with("avx512.mask3.vfnmsub.s"))) {
3873 bool IsMask3 = Name[11] == '3';
3874 bool IsMaskZ = Name[11] == 'z';
3875 // Drop the "avx512.mask." to make it easier.
3876 Name = Name.drop_front(IsMask3 || IsMaskZ ? 13 : 12);
3877 bool NegMul = Name[2] == 'n';
3878 bool NegAcc = NegMul ? Name[4] == 's' : Name[3] == 's';
3879
3880 Value *A = CI->getArgOperand(0);
3881 Value *B = CI->getArgOperand(1);
3882 Value *C = CI->getArgOperand(2);
3883
3884 if (NegMul && (IsMask3 || IsMaskZ))
3885 A = Builder.CreateFNeg(A);
3886 if (NegMul && !(IsMask3 || IsMaskZ))
3887 B = Builder.CreateFNeg(B);
3888 if (NegAcc)
3889 C = Builder.CreateFNeg(C);
3890
3891 A = Builder.CreateExtractElement(A, (uint64_t)0);
3892 B = Builder.CreateExtractElement(B, (uint64_t)0);
3893 C = Builder.CreateExtractElement(C, (uint64_t)0);
3894
3895 if (!isa<ConstantInt>(CI->getArgOperand(4)) ||
3896 cast<ConstantInt>(CI->getArgOperand(4))->getZExtValue() != 4) {
3897 Value *Ops[] = { A, B, C, CI->getArgOperand(4) };
3898
3899 Intrinsic::ID IID;
3900 if (Name.back() == 'd')
3901 IID = Intrinsic::x86_avx512_vfmadd_f64;
3902 else
3903 IID = Intrinsic::x86_avx512_vfmadd_f32;
3904 Function *FMA = Intrinsic::getDeclaration(CI->getModule(), IID);
3905 Rep = Builder.CreateCall(FMA, Ops);
3906 } else {
3908 Intrinsic::fma,
3909 A->getType());
3910 Rep = Builder.CreateCall(FMA, { A, B, C });
3911 }
3912
3913 Value *PassThru = IsMaskZ ? Constant::getNullValue(Rep->getType()) :
3914 IsMask3 ? C : A;
3915
3916 // For Mask3 with NegAcc, we need to create a new extractelement that
3917 // avoids the negation above.
3918 if (NegAcc && IsMask3)
3919 PassThru = Builder.CreateExtractElement(CI->getArgOperand(2),
3920 (uint64_t)0);
3921
3922 Rep = emitX86ScalarSelect(Builder, CI->getArgOperand(3), Rep, PassThru);
3923 Rep = Builder.CreateInsertElement(CI->getArgOperand(IsMask3 ? 2 : 0),
3924 Rep, (uint64_t)0);
3925 } else if (IsX86 && (Name.starts_with("avx512.mask.vfmadd.p") ||
3926 Name.starts_with("avx512.mask.vfnmadd.p") ||
3927 Name.starts_with("avx512.mask.vfnmsub.p") ||
3928 Name.starts_with("avx512.mask3.vfmadd.p") ||
3929 Name.starts_with("avx512.mask3.vfmsub.p") ||
3930 Name.starts_with("avx512.mask3.vfnmsub.p") ||
3931 Name.starts_with("avx512.maskz.vfmadd.p"))) {
3932 bool IsMask3 = Name[11] == '3';
3933 bool IsMaskZ = Name[11] == 'z';
3934 // Drop the "avx512.mask." to make it easier.
3935 Name = Name.drop_front(IsMask3 || IsMaskZ ? 13 : 12);
3936 bool NegMul = Name[2] == 'n';
3937 bool NegAcc = NegMul ? Name[4] == 's' : Name[3] == 's';
3938
3939 Value *A = CI->getArgOperand(0);
3940 Value *B = CI->getArgOperand(1);
3941 Value *C = CI->getArgOperand(2);
3942
3943 if (NegMul && (IsMask3 || IsMaskZ))
3944 A = Builder.CreateFNeg(A);
3945 if (NegMul && !(IsMask3 || IsMaskZ))
3946 B = Builder.CreateFNeg(B);
3947 if (NegAcc)
3948 C = Builder.CreateFNeg(C);
3949
3950 if (CI->arg_size() == 5 &&
3951 (!isa<ConstantInt>(CI->getArgOperand(4)) ||
3952 cast<ConstantInt>(CI->getArgOperand(4))->getZExtValue() != 4)) {
3953 Intrinsic::ID IID;
3954 // Check the character before ".512" in string.
3955 if (Name[Name.size()-5] == 's')
3956 IID = Intrinsic::x86_avx512_vfmadd_ps_512;
3957 else
3958 IID = Intrinsic::x86_avx512_vfmadd_pd_512;
3959
3960 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
3961 { A, B, C, CI->getArgOperand(4) });
3962 } else {
3964 Intrinsic::fma,
3965 A->getType());
3966 Rep = Builder.CreateCall(FMA, { A, B, C });
3967 }
3968
3969 Value *PassThru = IsMaskZ ? llvm::Constant::getNullValue(CI->getType()) :
3970 IsMask3 ? CI->getArgOperand(2) :
3971 CI->getArgOperand(0);
3972
3973 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
3974 } else if (IsX86 && Name.starts_with("fma.vfmsubadd.p")) {
3975 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
3976 unsigned EltWidth = CI->getType()->getScalarSizeInBits();
3977 Intrinsic::ID IID;
3978 if (VecWidth == 128 && EltWidth == 32)
3979 IID = Intrinsic::x86_fma_vfmaddsub_ps;
3980 else if (VecWidth == 256 && EltWidth == 32)
3981 IID = Intrinsic::x86_fma_vfmaddsub_ps_256;
3982 else if (VecWidth == 128 && EltWidth == 64)
3983 IID = Intrinsic::x86_fma_vfmaddsub_pd;
3984 else if (VecWidth == 256 && EltWidth == 64)
3985 IID = Intrinsic::x86_fma_vfmaddsub_pd_256;
3986 else
3987 llvm_unreachable("Unexpected intrinsic");
3988
3989 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1),
3990 CI->getArgOperand(2) };
3991 Ops[2] = Builder.CreateFNeg(Ops[2]);
3992 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
3993 Ops);
3994 } else if (IsX86 && (Name.starts_with("avx512.mask.vfmaddsub.p") ||
3995 Name.starts_with("avx512.mask3.vfmaddsub.p") ||
3996 Name.starts_with("avx512.maskz.vfmaddsub.p") ||
3997 Name.starts_with("avx512.mask3.vfmsubadd.p"))) {
3998 bool IsMask3 = Name[11] == '3';
3999 bool IsMaskZ = Name[11] == 'z';
4000 // Drop the "avx512.mask." to make it easier.
4001 Name = Name.drop_front(IsMask3 || IsMaskZ ? 13 : 12);
4002 bool IsSubAdd = Name[3] == 's';
4003 if (CI->arg_size() == 5) {
4004 Intrinsic::ID IID;
4005 // Check the character before ".512" in string.
4006 if (Name[Name.size()-5] == 's')
4007 IID = Intrinsic::x86_avx512_vfmaddsub_ps_512;
4008 else
4009 IID = Intrinsic::x86_avx512_vfmaddsub_pd_512;
4010
4011 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1),
4012 CI->getArgOperand(2), CI->getArgOperand(4) };
4013 if (IsSubAdd)
4014 Ops[2] = Builder.CreateFNeg(Ops[2]);
4015
4016 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
4017 Ops);
4018 } else {
4019 int NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
4020
4021 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1),
4022 CI->getArgOperand(2) };
4023
4024 Function *FMA = Intrinsic::getDeclaration(CI->getModule(), Intrinsic::fma,
4025 Ops[0]->getType());
4026 Value *Odd = Builder.CreateCall(FMA, Ops);
4027 Ops[2] = Builder.CreateFNeg(Ops[2]);
4028 Value *Even = Builder.CreateCall(FMA, Ops);
4029
4030 if (IsSubAdd)
4031 std::swap(Even, Odd);
4032
4033 SmallVector<int, 32> Idxs(NumElts);
4034 for (int i = 0; i != NumElts; ++i)
4035 Idxs[i] = i + (i % 2) * NumElts;
4036
4037 Rep = Builder.CreateShuffleVector(Even, Odd, Idxs);
4038 }
4039
4040 Value *PassThru = IsMaskZ ? llvm::Constant::getNullValue(CI->getType()) :
4041 IsMask3 ? CI->getArgOperand(2) :
4042 CI->getArgOperand(0);
4043
4044 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
4045 } else if (IsX86 && (Name.starts_with("avx512.mask.pternlog.") ||
4046 Name.starts_with("avx512.maskz.pternlog."))) {
4047 bool ZeroMask = Name[11] == 'z';
4048 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
4049 unsigned EltWidth = CI->getType()->getScalarSizeInBits();
4050 Intrinsic::ID IID;
4051 if (VecWidth == 128 && EltWidth == 32)
4052 IID = Intrinsic::x86_avx512_pternlog_d_128;
4053 else if (VecWidth == 256 && EltWidth == 32)
4054 IID = Intrinsic::x86_avx512_pternlog_d_256;
4055 else if (VecWidth == 512 && EltWidth == 32)
4056 IID = Intrinsic::x86_avx512_pternlog_d_512;
4057 else if (VecWidth == 128 && EltWidth == 64)
4058 IID = Intrinsic::x86_avx512_pternlog_q_128;
4059 else if (VecWidth == 256 && EltWidth == 64)
4060 IID = Intrinsic::x86_avx512_pternlog_q_256;
4061 else if (VecWidth == 512 && EltWidth == 64)
4062 IID = Intrinsic::x86_avx512_pternlog_q_512;
4063 else
4064 llvm_unreachable("Unexpected intrinsic");
4065
4066 Value *Args[] = { CI->getArgOperand(0) , CI->getArgOperand(1),
4067 CI->getArgOperand(2), CI->getArgOperand(3) };
4068 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID),
4069 Args);
4070 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType())
4071 : CI->getArgOperand(0);
4072 Rep = emitX86Select(Builder, CI->getArgOperand(4), Rep, PassThru);
4073 } else if (IsX86 && (Name.starts_with("avx512.mask.vpmadd52") ||
4074 Name.starts_with("avx512.maskz.vpmadd52"))) {
4075 bool ZeroMask = Name[11] == 'z';
4076 bool High = Name[20] == 'h' || Name[21] == 'h';
4077 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
4078 Intrinsic::ID IID;
4079 if (VecWidth == 128 && !High)
4080 IID = Intrinsic::x86_avx512_vpmadd52l_uq_128;
4081 else if (VecWidth == 256 && !High)
4082 IID = Intrinsic::x86_avx512_vpmadd52l_uq_256;
4083 else if (VecWidth == 512 && !High)
4084 IID = Intrinsic::x86_avx512_vpmadd52l_uq_512;
4085 else if (VecWidth == 128 && High)
4086 IID = Intrinsic::x86_avx512_vpmadd52h_uq_128;
4087 else if (VecWidth == 256 && High)
4088 IID = Intrinsic::x86_avx512_vpmadd52h_uq_256;
4089 else if (VecWidth == 512 && High)
4090 IID = Intrinsic::x86_avx512_vpmadd52h_uq_512;
4091 else
4092 llvm_unreachable("Unexpected intrinsic");
4093
4094 Value *Args[] = { CI->getArgOperand(0) , CI->getArgOperand(1),
4095 CI->getArgOperand(2) };
4096 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID),
4097 Args);
4098 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType())
4099 : CI->getArgOperand(0);
4100 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
4101 } else if (IsX86 && (Name.starts_with("avx512.mask.vpermi2var.") ||
4102 Name.starts_with("avx512.mask.vpermt2var.") ||
4103 Name.starts_with("avx512.maskz.vpermt2var."))) {
4104 bool ZeroMask = Name[11] == 'z';
4105 bool IndexForm = Name[17] == 'i';
4106 Rep = upgradeX86VPERMT2Intrinsics(Builder, *CI, ZeroMask, IndexForm);
4107 } else if (IsX86 && (Name.starts_with("avx512.mask.vpdpbusd.") ||
4108 Name.starts_with("avx512.maskz.vpdpbusd.") ||
4109 Name.starts_with("avx512.mask.vpdpbusds.") ||
4110 Name.starts_with("avx512.maskz.vpdpbusds."))) {
4111 bool ZeroMask = Name[11] == 'z';
4112 bool IsSaturating = Name[ZeroMask ? 21 : 20] == 's';
4113 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
4114 Intrinsic::ID IID;
4115 if (VecWidth == 128 && !IsSaturating)
4116 IID = Intrinsic::x86_avx512_vpdpbusd_128;
4117 else if (VecWidth == 256 && !IsSaturating)
4118 IID = Intrinsic::x86_avx512_vpdpbusd_256;
4119 else if (VecWidth == 512 && !IsSaturating)
4120 IID = Intrinsic::x86_avx512_vpdpbusd_512;
4121 else if (VecWidth == 128 && IsSaturating)
4122 IID = Intrinsic::x86_avx512_vpdpbusds_128;
4123 else if (VecWidth == 256 && IsSaturating)
4124 IID = Intrinsic::x86_avx512_vpdpbusds_256;
4125 else if (VecWidth == 512 && IsSaturating)
4126 IID = Intrinsic::x86_avx512_vpdpbusds_512;
4127 else
4128 llvm_unreachable("Unexpected intrinsic");
4129
4130 Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(1),
4131 CI->getArgOperand(2) };
4132 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID),
4133 Args);
4134 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType())
4135 : CI->getArgOperand(0);
4136 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
4137 } else if (IsX86 && (Name.starts_with("avx512.mask.vpdpwssd.") ||
4138 Name.starts_with("avx512.maskz.vpdpwssd.") ||
4139 Name.starts_with("avx512.mask.vpdpwssds.") ||
4140 Name.starts_with("avx512.maskz.vpdpwssds."))) {
4141 bool ZeroMask = Name[11] == 'z';
4142 bool IsSaturating = Name[ZeroMask ? 21 : 20] == 's';
4143 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
4144 Intrinsic::ID IID;
4145 if (VecWidth == 128 && !IsSaturating)
4146 IID = Intrinsic::x86_avx512_vpdpwssd_128;
4147 else if (VecWidth == 256 && !IsSaturating)
4148 IID = Intrinsic::x86_avx512_vpdpwssd_256;
4149 else if (VecWidth == 512 && !IsSaturating)
4150 IID = Intrinsic::x86_avx512_vpdpwssd_512;
4151 else if (VecWidth == 128 && IsSaturating)
4152 IID = Intrinsic::x86_avx512_vpdpwssds_128;
4153 else if (VecWidth == 256 && IsSaturating)
4154 IID = Intrinsic::x86_avx512_vpdpwssds_256;
4155 else if (VecWidth == 512 && IsSaturating)
4156 IID = Intrinsic::x86_avx512_vpdpwssds_512;
4157 else
4158 llvm_unreachable("Unexpected intrinsic");
4159
4160 Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(1),
4161 CI->getArgOperand(2) };
4162 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID),
4163 Args);
4164 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType())
4165 : CI->getArgOperand(0);
4166 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
4167 } else if (IsX86 && (Name == "addcarryx.u32" || Name == "addcarryx.u64" ||
4168 Name == "addcarry.u32" || Name == "addcarry.u64" ||
4169 Name == "subborrow.u32" || Name == "subborrow.u64")) {
4170 Intrinsic::ID IID;
4171 if (Name[0] == 'a' && Name.back() == '2')
4172 IID = Intrinsic::x86_addcarry_32;
4173 else if (Name[0] == 'a' && Name.back() == '4')
4174 IID = Intrinsic::x86_addcarry_64;
4175 else if (Name[0] == 's' && Name.back() == '2')
4176 IID = Intrinsic::x86_subborrow_32;
4177 else if (Name[0] == 's' && Name.back() == '4')
4178 IID = Intrinsic::x86_subborrow_64;
4179 else
4180 llvm_unreachable("Unexpected intrinsic");
4181
4182 // Make a call with 3 operands.
4183 Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(1),
4184 CI->getArgOperand(2)};
4185 Value *NewCall = Builder.CreateCall(
4187 Args);
4188
4189 // Extract the second result and store it.
4190 Value *Data = Builder.CreateExtractValue(NewCall, 1);
4191 // Cast the pointer to the right type.
4192 Value *Ptr = Builder.CreateBitCast(CI->getArgOperand(3),
4193 llvm::PointerType::getUnqual(Data->getType()));
4194 Builder.CreateAlignedStore(Data, Ptr, Align(1));
4195 // Replace the original call result with the first result of the new call.
4196 Value *CF = Builder.CreateExtractValue(NewCall, 0);
4197
4198 CI->replaceAllUsesWith(CF);
4199 Rep = nullptr;
4200 } else if (IsX86 && Name.starts_with("avx512.mask.") &&
4201 upgradeAVX512MaskToSelect(Name, Builder, *CI, Rep)) {
4202 // Rep will be updated by the call in the condition.
4203 } else if (IsNVVM && (Name == "abs.i" || Name == "abs.ll")) {
4204 Value *Arg = CI->getArgOperand(0);
4205 Value *Neg = Builder.CreateNeg(Arg, "neg");
4206 Value *Cmp = Builder.CreateICmpSGE(
4207 Arg, llvm::Constant::getNullValue(Arg->getType()), "abs.cond");
4208 Rep = Builder.CreateSelect(Cmp, Arg, Neg, "abs");
4209 } else if (IsNVVM && (Name.starts_with("atomic.load.add.f32.p") ||
4210 Name.starts_with("atomic.load.add.f64.p"))) {
4211 Value *Ptr = CI->getArgOperand(0);
4212 Value *Val = CI->getArgOperand(1);
4213 Rep = Builder.CreateAtomicRMW(AtomicRMWInst::FAdd, Ptr, Val, MaybeAlign(),
4214 AtomicOrdering::SequentiallyConsistent);
4215 } else if (IsNVVM && Name.consume_front("max.") &&
4216 (Name == "s" || Name == "i" || Name == "ll" || Name == "us" ||
4217 Name == "ui" || Name == "ull")) {
4218 Value *Arg0 = CI->getArgOperand(0);
4219 Value *Arg1 = CI->getArgOperand(1);
4220 Value *Cmp = Name.starts_with("u")
4221 ? Builder.CreateICmpUGE(Arg0, Arg1, "max.cond")
4222 : Builder.CreateICmpSGE(Arg0, Arg1, "max.cond");
4223 Rep = Builder.CreateSelect(Cmp, Arg0, Arg1, "max");
4224 } else if (IsNVVM && Name.consume_front("min.") &&
4225 (Name == "s" || Name == "i" || Name == "ll" || Name == "us" ||
4226 Name == "ui" || Name == "ull")) {
4227 Value *Arg0 = CI->getArgOperand(0);
4228 Value *Arg1 = CI->getArgOperand(1);
4229 Value *Cmp = Name.starts_with("u")
4230 ? Builder.CreateICmpULE(Arg0, Arg1, "min.cond")
4231 : Builder.CreateICmpSLE(Arg0, Arg1, "min.cond");
4232 Rep = Builder.CreateSelect(Cmp, Arg0, Arg1, "min");
4233 } else if (IsNVVM && Name == "clz.ll") {
4234 // llvm.nvvm.clz.ll returns an i32, but llvm.ctlz.i64 returns an i64.
4235 Value *Arg = CI->getArgOperand(0);
4236 Value *Ctlz = Builder.CreateCall(
4237 Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctlz,
4238 {Arg->getType()}),
4239 {Arg, Builder.getFalse()}, "ctlz");
4240 Rep = Builder.CreateTrunc(Ctlz, Builder.getInt32Ty(), "ctlz.trunc");
4241 } else if (IsNVVM && Name == "popc.ll") {
4242 // llvm.nvvm.popc.ll returns an i32, but llvm.ctpop.i64 returns an
4243 // i64.
4244 Value *Arg = CI->getArgOperand(0);
4245 Value *Popc = Builder.CreateCall(
4246 Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctpop,
4247 {Arg->getType()}),
4248 Arg, "ctpop");
4249 Rep = Builder.CreateTrunc(Popc, Builder.getInt32Ty(), "ctpop.trunc");
4250 } else if (IsNVVM) {
4251 if (Name == "h2f") {
4252 Rep =
4254 F->getParent(), Intrinsic::convert_from_fp16,
4255 {Builder.getFloatTy()}),
4256 CI->getArgOperand(0), "h2f");
4257 } else {
4259 if (IID != Intrinsic::not_intrinsic &&
4260 !F->getReturnType()->getScalarType()->isBFloatTy()) {
4261 rename(F);
4262 NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
4264 for (size_t I = 0; I < NewFn->arg_size(); ++I) {
4265 Value *Arg = CI->getArgOperand(I);
4266 Type *OldType = Arg->getType();
4267 Type *NewType = NewFn->getArg(I)->getType();
4268 Args.push_back((OldType->isIntegerTy() &&
4269 NewType->getScalarType()->isBFloatTy())
4270 ? Builder.CreateBitCast(Arg, NewType)
4271 : Arg);
4272 }
4273 Rep = Builder.CreateCall(NewFn, Args);
4274 if (F->getReturnType()->isIntegerTy())
4275 Rep = Builder.CreateBitCast(Rep, F->getReturnType());
4276 }
4277 }
4278 } else if (IsARM) {
4279 Rep = upgradeARMIntrinsicCall(Name, CI, F, Builder);
4280 } else if (IsAMDGCN) {
4281 Rep = upgradeAMDGCNIntrinsicCall(Name, CI, F, Builder);
4282 } else if (IsDbg) {
4283 // We might have decided we don't want the new format after all between
4284 // first requesting the upgrade and now; skip the conversion if that is
4285 // the case, and check here to see if the intrinsic needs to be upgraded
4286 // normally.
4287 if (!CI->getModule()->IsNewDbgInfoFormat) {
4288 bool NeedsUpgrade =
4289 upgradeIntrinsicFunction1(CI->getCalledFunction(), NewFn, false);
4290 if (!NeedsUpgrade)
4291 return;
4292 FallthroughToDefaultUpgrade = true;
4293 } else {
4295 }
4296 } else {
4297 llvm_unreachable("Unknown function for CallBase upgrade.");
4298 }
4299
4300 if (!FallthroughToDefaultUpgrade) {
4301 if (Rep)
4302 CI->replaceAllUsesWith(Rep);
4303 CI->eraseFromParent();
4304 return;
4305 }
4306 }
4307
4308 const auto &DefaultCase = [&]() -> void {
4309 if (CI->getFunctionType() == NewFn->getFunctionType()) {
4310 // Handle generic mangling change.
4311 assert(
4312 (CI->getCalledFunction()->getName() != NewFn->getName()) &&
4313 "Unknown function for CallBase upgrade and isn't just a name change");
4314 CI->setCalledFunction(NewFn);
4315 return;
4316 }
4317
4318 // This must be an upgrade from a named to a literal struct.
4319 if (auto *OldST = dyn_cast<StructType>(CI->getType())) {
4320 assert(OldST != NewFn->getReturnType() &&
4321 "Return type must have changed");
4322 assert(OldST->getNumElements() ==
4323 cast<StructType>(NewFn->getReturnType())->getNumElements() &&
4324 "Must have same number of elements");
4325
4326 SmallVector<Value *> Args(CI->args());
4327 Value *NewCI = Builder.CreateCall(NewFn, Args);
4328 Value *Res = PoisonValue::get(OldST);
4329 for (unsigned Idx = 0; Idx < OldST->getNumElements(); ++Idx) {
4330 Value *Elem = Builder.CreateExtractValue(NewCI, Idx);
4331 Res = Builder.CreateInsertValue(Res, Elem, Idx);
4332 }
4333 CI->replaceAllUsesWith(Res);
4334 CI->eraseFromParent();
4335 return;
4336 }
4337
4338 // We're probably about to produce something invalid. Let the verifier catch
4339 // it instead of dying here.
4340 CI->setCalledOperand(
4342 return;
4343 };
4344 CallInst *NewCall = nullptr;
4345 switch (NewFn->getIntrinsicID()) {
4346 default: {
4347 DefaultCase();
4348 return;
4349 }
4350 case Intrinsic::arm_neon_vst1:
4351 case Intrinsic::arm_neon_vst2:
4352 case Intrinsic::arm_neon_vst3:
4353 case Intrinsic::arm_neon_vst4:
4354 case Intrinsic::arm_neon_vst2lane:
4355 case Intrinsic::arm_neon_vst3lane:
4356 case Intrinsic::arm_neon_vst4lane: {
4357 SmallVector<Value *, 4> Args(CI->args());
4358 NewCall = Builder.CreateCall(NewFn, Args);
4359 break;
4360 }
4361 case Intrinsic::aarch64_sve_bfmlalb_lane_v2:
4362 case Intrinsic::aarch64_sve_bfmlalt_lane_v2:
4363 case Intrinsic::aarch64_sve_bfdot_lane_v2: {
4364 LLVMContext &Ctx = F->getParent()->getContext();
4365 SmallVector<Value *, 4> Args(CI->args());
4366 Args[3] = ConstantInt::get(Type::getInt32Ty(Ctx),
4367 cast<ConstantInt>(Args[3])->getZExtValue());
4368 NewCall = Builder.CreateCall(NewFn, Args);
4369 break;
4370 }
4371 case Intrinsic::aarch64_sve_ld3_sret:
4372 case Intrinsic::aarch64_sve_ld4_sret:
4373 case Intrinsic::aarch64_sve_ld2_sret: {
4374 StringRef Name = F->getName();
4375 Name = Name.substr(5);
4376 unsigned N = StringSwitch<unsigned>(Name)
4377 .StartsWith("aarch64.sve.ld2", 2)
4378 .StartsWith("aarch64.sve.ld3", 3)
4379 .StartsWith("aarch64.sve.ld4", 4)
4380 .Default(0);
4382 dyn_cast<ScalableVectorType>(F->getReturnType());
4383 unsigned MinElts = RetTy->getMinNumElements() / N;
4384 SmallVector<Value *, 2> Args(CI->args());
4385 Value *NewLdCall = Builder.CreateCall(NewFn, Args);
4387 for (unsigned I = 0; I < N; I++) {
4388 Value *Idx = ConstantInt::get(Type::getInt64Ty(C), I * MinElts);
4389 Value *SRet = Builder.CreateExtractValue(NewLdCall, I);
4390 Ret = Builder.CreateInsertVector(RetTy, Ret, SRet, Idx);
4391 }
4392 NewCall = dyn_cast<CallInst>(Ret);
4393 break;
4394 }
4395
4396 case Intrinsic::coro_end: {
4397 SmallVector<Value *, 3> Args(CI->args());
4398 Args.push_back(ConstantTokenNone::get(CI->getContext()));
4399 NewCall = Builder.CreateCall(NewFn, Args);
4400 break;
4401 }
4402
4403 case Intrinsic::vector_extract: {
4404 StringRef Name = F->getName();
4405 Name = Name.substr(5); // Strip llvm
4406 if (!Name.starts_with("aarch64.sve.tuple.get")) {
4407 DefaultCase();
4408 return;
4409 }
4411 dyn_cast<ScalableVectorType>(F->getReturnType());
4412 unsigned MinElts = RetTy->getMinNumElements();
4413 unsigned I = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
4414 Value *NewIdx = ConstantInt::get(Type::getInt64Ty(C), I * MinElts);
4415 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0), NewIdx});
4416 break;
4417 }
4418
4419 case Intrinsic::vector_insert: {
4420 StringRef Name = F->getName();
4421 Name = Name.substr(5);
4422 if (!Name.starts_with("aarch64.sve.tuple")) {
4423 DefaultCase();
4424 return;
4425 }
4426 if (Name.starts_with("aarch64.sve.tuple.set")) {
4427 unsigned I = dyn_cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
4428 ScalableVectorType *Ty =
4429 dyn_cast<ScalableVectorType>(CI->getArgOperand(2)->getType());
4430 Value *NewIdx =
4431 ConstantInt::get(Type::getInt64Ty(C), I * Ty->getMinNumElements());
4432 NewCall = Builder.CreateCall(
4433 NewFn, {CI->getArgOperand(0), CI->getArgOperand(2), NewIdx});
4434 break;
4435 }
4436 if (Name.starts_with("aarch64.sve.tuple.create")) {
4437 unsigned N = StringSwitch<unsigned>(Name)
4438 .StartsWith("aarch64.sve.tuple.create2", 2)
4439 .StartsWith("aarch64.sve.tuple.create3", 3)
4440 .StartsWith("aarch64.sve.tuple.create4", 4)
4441 .Default(0);
4442 assert(N > 1 && "Create is expected to be between 2-4");
4444 dyn_cast<ScalableVectorType>(F->getReturnType());
4446 unsigned MinElts = RetTy->getMinNumElements() / N;
4447 for (unsigned I = 0; I < N; I++) {
4448 Value *Idx = ConstantInt::get(Type::getInt64Ty(C), I * MinElts);
4449 Value *V = CI->getArgOperand(I);
4450 Ret = Builder.CreateInsertVector(RetTy, Ret, V, Idx);
4451 }
4452 NewCall = dyn_cast<CallInst>(Ret);
4453 }
4454 break;
4455 }
4456
4457 case Intrinsic::arm_neon_bfdot:
4458 case Intrinsic::arm_neon_bfmmla:
4459 case Intrinsic::arm_neon_bfmlalb:
4460 case Intrinsic::arm_neon_bfmlalt:
4461 case Intrinsic::aarch64_neon_bfdot:
4462 case Intrinsic::aarch64_neon_bfmmla:
4463 case Intrinsic::aarch64_neon_bfmlalb:
4464 case Intrinsic::aarch64_neon_bfmlalt: {
4466 assert(CI->arg_size() == 3 &&
4467 "Mismatch between function args and call args");
4468 size_t OperandWidth =
4470 assert((OperandWidth == 64 || OperandWidth == 128) &&
4471 "Unexpected operand width");
4472 Type *NewTy = FixedVectorType::get(Type::getBFloatTy(C), OperandWidth / 16);
4473 auto Iter = CI->args().begin();
4474 Args.push_back(*Iter++);
4475 Args.push_back(Builder.CreateBitCast(*Iter++, NewTy));
4476 Args.push_back(Builder.CreateBitCast(*Iter++, NewTy));
4477 NewCall = Builder.CreateCall(NewFn, Args);
4478 break;
4479 }
4480
4481 case Intrinsic::bitreverse:
4482 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)});
4483 break;
4484
4485 case Intrinsic::ctlz:
4486 case Intrinsic::cttz:
4487 assert(CI->arg_size() == 1 &&
4488 "Mismatch between function args and call args");
4489 NewCall =
4490 Builder.CreateCall(NewFn, {CI->getArgOperand(0), Builder.getFalse()});
4491 break;
4492
4493 case Intrinsic::objectsize: {
4494 Value *NullIsUnknownSize =
4495 CI->arg_size() == 2 ? Builder.getFalse() : CI->getArgOperand(2);
4496 Value *Dynamic =
4497 CI->arg_size() < 4 ? Builder.getFalse() : CI->getArgOperand(3);
4498 NewCall = Builder.CreateCall(
4499 NewFn, {CI->getArgOperand(0), CI->getArgOperand(1), NullIsUnknownSize, Dynamic});
4500 break;
4501 }
4502
4503 case Intrinsic::ctpop:
4504 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)});
4505 break;
4506
4507 case Intrinsic::convert_from_fp16:
4508 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)});
4509 break;
4510
4511 case Intrinsic::dbg_value: {
4512 StringRef Name = F->getName();
4513 Name = Name.substr(5); // Strip llvm.
4514 // Upgrade `dbg.addr` to `dbg.value` with `DW_OP_deref`.
4515 if (Name.starts_with("dbg.addr")) {
4516 DIExpression *Expr = cast<DIExpression>(
4517 cast<MetadataAsValue>(CI->getArgOperand(2))->getMetadata());
4518 Expr = DIExpression::append(Expr, dwarf::DW_OP_deref);
4519 NewCall =
4520 Builder.CreateCall(NewFn, {CI->getArgOperand(0), CI->getArgOperand(1),
4521 MetadataAsValue::get(C, Expr)});
4522 break;
4523 }
4524
4525 // Upgrade from the old version that had an extra offset argument.
4526 assert(CI->arg_size() == 4);
4527 // Drop nonzero offsets instead of attempting to upgrade them.
4528 if (auto *Offset = dyn_cast_or_null<Constant>(CI->getArgOperand(1)))
4529 if (Offset->isZeroValue()) {
4530 NewCall = Builder.CreateCall(
4531 NewFn,
4532 {CI->getArgOperand(0), CI->getArgOperand(2), CI->getArgOperand(3)});
4533 break;
4534 }
4535 CI->eraseFromParent();
4536 return;
4537 }
4538
4539 case Intrinsic::ptr_annotation:
4540 // Upgrade from versions that lacked the annotation attribute argument.
4541 if (CI->arg_size() != 4) {
4542 DefaultCase();
4543 return;
4544 }
4545
4546 // Create a new call with an added null annotation attribute argument.
4547 NewCall =
4548 Builder.CreateCall(NewFn, {CI->getArgOperand(0), CI->getArgOperand(1),
4549 CI->getArgOperand(2), CI->getArgOperand(3),
4550 Constant::getNullValue(Builder.getPtrTy())});
4551 NewCall->takeName(CI);
4552 CI->replaceAllUsesWith(NewCall);
4553 CI->eraseFromParent();
4554 return;
4555
4556 case Intrinsic::var_annotation:
4557 // Upgrade from versions that lacked the annotation attribute argument.
4558 if (CI->arg_size() != 4) {
4559 DefaultCase();
4560 return;
4561 }
4562 // Create a new call with an added null annotation attribute argument.
4563 NewCall =
4564 Builder.CreateCall(NewFn, {CI->getArgOperand(0), CI->getArgOperand(1),
4565 CI->getArgOperand(2), CI->getArgOperand(3),
4566 Constant::getNullValue(Builder.getPtrTy())});
4567 NewCall->takeName(CI);
4568 CI->replaceAllUsesWith(NewCall);
4569 CI->eraseFromParent();
4570 return;
4571
4572 case Intrinsic::riscv_aes32dsi:
4573 case Intrinsic::riscv_aes32dsmi:
4574 case Intrinsic::riscv_aes32esi:
4575 case Intrinsic::riscv_aes32esmi:
4576 case Intrinsic::riscv_sm4ks:
4577 case Intrinsic::riscv_sm4ed: {
4578 // The last argument to these intrinsics used to be i8 and changed to i32.
4579 // The type overload for sm4ks and sm4ed was removed.
4580 Value *Arg2 = CI->getArgOperand(2);
4581 if (Arg2->getType()->isIntegerTy(32) && !CI->getType()->isIntegerTy(64))
4582 return;
4583
4584 Value *Arg0 = CI->getArgOperand(0);
4585 Value *Arg1 = CI->getArgOperand(1);
4586 if (CI->getType()->isIntegerTy(64)) {
4587 Arg0 = Builder.CreateTrunc(Arg0, Builder.getInt32Ty());
4588 Arg1 = Builder.CreateTrunc(Arg1, Builder.getInt32Ty());
4589 }
4590
4591 Arg2 = ConstantInt::get(Type::getInt32Ty(C),
4592 cast<ConstantInt>(Arg2)->getZExtValue());
4593
4594 NewCall = Builder.CreateCall(NewFn, {Arg0, Arg1, Arg2});
4595 Value *Res = NewCall;
4596 if (Res->getType() != CI->getType())
4597 Res = Builder.CreateIntCast(NewCall, CI->getType(), /*isSigned*/ true);
4598 NewCall->takeName(CI);
4599 CI->replaceAllUsesWith(Res);
4600 CI->eraseFromParent();
4601 return;
4602 }
4603 case Intrinsic::riscv_sha256sig0:
4604 case Intrinsic::riscv_sha256sig1:
4605 case Intrinsic::riscv_sha256sum0:
4606 case Intrinsic::riscv_sha256sum1:
4607 case Intrinsic::riscv_sm3p0:
4608 case Intrinsic::riscv_sm3p1: {
4609 // The last argument to these intrinsics used to be i8 and changed to i32.
4610 // The type overload for sm4ks and sm4ed was removed.
4611 if (!CI->getType()->isIntegerTy(64))
4612 return;
4613
4614 Value *Arg =
4615 Builder.CreateTrunc(CI->getArgOperand(0), Builder.getInt32Ty());
4616
4617 NewCall = Builder.CreateCall(NewFn, Arg);
4618 Value *Res =
4619 Builder.CreateIntCast(NewCall, CI->getType(), /*isSigned*/ true);
4620 NewCall->takeName(CI);
4621 CI->replaceAllUsesWith(Res);
4622 CI->eraseFromParent();
4623 return;
4624 }
4625
4626 case Intrinsic::x86_xop_vfrcz_ss:
4627 case Intrinsic::x86_xop_vfrcz_sd:
4628 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(1)});
4629 break;
4630
4631 case Intrinsic::x86_xop_vpermil2pd:
4632 case Intrinsic::x86_xop_vpermil2ps:
4633 case Intrinsic::x86_xop_vpermil2pd_256:
4634 case Intrinsic::x86_xop_vpermil2ps_256: {
4635 SmallVector<Value *, 4> Args(CI->args());
4636 VectorType *FltIdxTy = cast<VectorType>(Args[2]->getType());
4637 VectorType *IntIdxTy = VectorType::getInteger(FltIdxTy);
4638 Args[2] = Builder.CreateBitCast(Args[2], IntIdxTy);
4639 NewCall = Builder.CreateCall(NewFn, Args);
4640 break;
4641 }
4642
4643 case Intrinsic::x86_sse41_ptestc:
4644 case Intrinsic::x86_sse41_ptestz:
4645 case Intrinsic::x86_sse41_ptestnzc: {
4646 // The arguments for these intrinsics used to be v4f32, and changed
4647 // to v2i64. This is purely a nop, since those are bitwise intrinsics.
4648 // So, the only thing required is a bitcast for both arguments.
4649 // First, check the arguments have the old type.
4650 Value *Arg0 = CI->getArgOperand(0);
4651 if (Arg0->getType() != FixedVectorType::get(Type::getFloatTy(C), 4))
4652 return;
4653
4654 // Old intrinsic, add bitcasts
4655 Value *Arg1 = CI->getArgOperand(1);
4656
4657 auto *NewVecTy = FixedVectorType::get(Type::getInt64Ty(C), 2);
4658
4659 Value *BC0 = Builder.CreateBitCast(Arg0, NewVecTy, "cast");
4660 Value *BC1 = Builder.CreateBitCast(Arg1, NewVecTy, "cast");
4661
4662 NewCall = Builder.CreateCall(NewFn, {BC0, BC1});
4663 break;
4664 }
4665
4666 case Intrinsic::x86_rdtscp: {
4667 // This used to take 1 arguments. If we have no arguments, it is already
4668 // upgraded.
4669 if (CI->getNumOperands() == 0)
4670 return;
4671
4672 NewCall = Builder.CreateCall(NewFn);
4673 // Extract the second result and store it.
4674 Value *Data = Builder.CreateExtractValue(NewCall, 1);
4675 // Cast the pointer to the right type.
4676 Value *Ptr = Builder.CreateBitCast(CI->getArgOperand(0),
4677 llvm::PointerType::getUnqual(Data->getType()));
4678 Builder.CreateAlignedStore(Data, Ptr, Align(1));
4679 // Replace the original call result with the first result of the new call.
4680 Value *TSC = Builder.CreateExtractValue(NewCall, 0);
4681
4682 NewCall->takeName(CI);
4683 CI->replaceAllUsesWith(TSC);
4684 CI->eraseFromParent();
4685 return;
4686 }
4687
4688 case Intrinsic::x86_sse41_insertps:
4689 case Intrinsic::x86_sse41_dppd:
4690 case Intrinsic::x86_sse41_dpps:
4691 case Intrinsic::x86_sse41_mpsadbw:
4692 case Intrinsic::x86_avx_dp_ps_256:
4693 case Intrinsic::x86_avx2_mpsadbw: {
4694 // Need to truncate the last argument from i32 to i8 -- this argument models
4695 // an inherently 8-bit immediate operand to these x86 instructions.
4696 SmallVector<Value *, 4> Args(CI->args());
4697
4698 // Replace the last argument with a trunc.
4699 Args.back() = Builder.CreateTrunc(Args.back(), Type::getInt8Ty(C), "trunc");
4700 NewCall = Builder.CreateCall(NewFn, Args);
4701 break;
4702 }
4703
4704 case Intrinsic::x86_avx512_mask_cmp_pd_128:
4705 case Intrinsic::x86_avx512_mask_cmp_pd_256:
4706 case Intrinsic::x86_avx512_mask_cmp_pd_512:
4707 case Intrinsic::x86_avx512_mask_cmp_ps_128:
4708 case Intrinsic::x86_avx512_mask_cmp_ps_256:
4709 case Intrinsic::x86_avx512_mask_cmp_ps_512: {
4710 SmallVector<Value *, 4> Args(CI->args());
4711 unsigned NumElts =
4712 cast<FixedVectorType>(Args[0]->getType())->getNumElements();
4713 Args[3] = getX86MaskVec(Builder, Args[3], NumElts);
4714
4715 NewCall = Builder.CreateCall(NewFn, Args);
4716 Value *Res = applyX86MaskOn1BitsVec(Builder, NewCall, nullptr);
4717
4718 NewCall->takeName(CI);
4719 CI->replaceAllUsesWith(Res);
4720 CI->eraseFromParent();
4721 return;
4722 }
4723
4724 case Intrinsic::x86_avx512bf16_cvtne2ps2bf16_128:
4725 case Intrinsic::x86_avx512bf16_cvtne2ps2bf16_256:
4726 case Intrinsic::x86_avx512bf16_cvtne2ps2bf16_512:
4727 case Intrinsic::x86_avx512bf16_mask_cvtneps2bf16_128:
4728 case Intrinsic::x86_avx512bf16_cvtneps2bf16_256:
4729 case Intrinsic::x86_avx512bf16_cvtneps2bf16_512: {
4730 SmallVector<Value *, 4> Args(CI->args());
4731 unsigned NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
4732 if (NewFn->getIntrinsicID() ==
4733 Intrinsic::x86_avx512bf16_mask_cvtneps2bf16_128)
4734 Args[1] = Builder.CreateBitCast(
4735 Args[1], FixedVectorType::get(Builder.getBFloatTy(), NumElts));
4736
4737 NewCall = Builder.CreateCall(NewFn, Args);
4738 Value *Res = Builder.CreateBitCast(
4739 NewCall, FixedVectorType::get(Builder.getInt16Ty(), NumElts));
4740
4741 NewCall->takeName(CI);
4742 CI->replaceAllUsesWith(Res);
4743 CI->eraseFromParent();
4744 return;
4745 }
4746 case Intrinsic::x86_avx512bf16_dpbf16ps_128:
4747 case Intrinsic::x86_avx512bf16_dpbf16ps_256:
4748 case Intrinsic::x86_avx512bf16_dpbf16ps_512:{
4749 SmallVector<Value *, 4> Args(CI->args());
4750 unsigned NumElts =
4751 cast<FixedVectorType>(CI->getType())->getNumElements() * 2;
4752 Args[1] = Builder.CreateBitCast(
4753 Args[1], FixedVectorType::get(Builder.getBFloatTy(), NumElts));
4754 Args[2] = Builder.CreateBitCast(
4755 Args[2], FixedVectorType::get(Builder.getBFloatTy(), NumElts));
4756
4757 NewCall = Builder.CreateCall(NewFn, Args);
4758 break;
4759 }
4760
4761 case Intrinsic::thread_pointer: {
4762 NewCall = Builder.CreateCall(NewFn, {});
4763 break;
4764 }
4765
4766 case Intrinsic::memcpy:
4767 case Intrinsic::memmove:
4768 case Intrinsic::memset: {
4769 // We have to make sure that the call signature is what we're expecting.
4770 // We only want to change the old signatures by removing the alignment arg:
4771 // @llvm.mem[cpy|move]...(i8*, i8*, i[32|i64], i32, i1)
4772 // -> @llvm.mem[cpy|move]...(i8*, i8*, i[32|i64], i1)
4773 // @llvm.memset...(i8*, i8, i[32|64], i32, i1)
4774 // -> @llvm.memset...(i8*, i8, i[32|64], i1)
4775 // Note: i8*'s in the above can be any pointer type
4776 if (CI->arg_size() != 5) {
4777 DefaultCase();
4778 return;
4779 }
4780 // Remove alignment argument (3), and add alignment attributes to the
4781 // dest/src pointers.
4782 Value *Args[4] = {CI->getArgOperand(0), CI->getArgOperand(1),
4783 CI->getArgOperand(2), CI->getArgOperand(4)};
4784 NewCall = Builder.CreateCall(NewFn, Args);
4785 AttributeList OldAttrs = CI->getAttributes();
4787 C, OldAttrs.getFnAttrs(), OldAttrs.getRetAttrs(),
4788 {OldAttrs.getParamAttrs(0), OldAttrs.getParamAttrs(1),
4789 OldAttrs.getParamAttrs(2), OldAttrs.getParamAttrs(4)});
4790 NewCall->setAttributes(NewAttrs);
4791 auto *MemCI = cast<MemIntrinsic>(NewCall);
4792 // All mem intrinsics support dest alignment.
4793 const ConstantInt *Align = cast<ConstantInt>(CI->getArgOperand(3));
4794 MemCI->setDestAlignment(Align->getMaybeAlignValue());
4795 // Memcpy/Memmove also support source alignment.
4796 if (auto *MTI = dyn_cast<MemTransferInst>(MemCI))
4797 MTI->setSourceAlignment(Align->getMaybeAlignValue());
4798 break;
4799 }
4800 }
4801 assert(NewCall && "Should have either set this variable or returned through "
4802 "the default case");
4803 NewCall->takeName(CI);
4804 CI->replaceAllUsesWith(NewCall);
4805 CI->eraseFromParent();
4806}
4807
4809 assert(F && "Illegal attempt to upgrade a non-existent intrinsic.");
4810
4811 // Check if this function should be upgraded and get the replacement function
4812 // if there is one.
4813 Function *NewFn;
4814 if (UpgradeIntrinsicFunction(F, NewFn)) {
4815 // Replace all users of the old function with the new function or new
4816 // instructions. This is not a range loop because the call is deleted.
4817 for (User *U : make_early_inc_range(F->users()))
4818 if (CallBase *CB = dyn_cast<CallBase>(U))
4819 UpgradeIntrinsicCall(CB, NewFn);
4820
4821 // Remove old function, no longer used, from the module.
4822 F->eraseFromParent();
4823 }
4824}
4825
4827 const unsigned NumOperands = MD.getNumOperands();
4828 if (NumOperands == 0)
4829 return &MD; // Invalid, punt to a verifier error.
4830
4831 // Check if the tag uses struct-path aware TBAA format.
4832 if (isa<MDNode>(MD.getOperand(0)) && NumOperands >= 3)
4833 return &MD;
4834
4835 auto &Context = MD.getContext();
4836 if (NumOperands == 3) {
4837 Metadata *Elts[] = {MD.getOperand(0), MD.getOperand(1)};
4838 MDNode *ScalarType = MDNode::get(Context, Elts);
4839 // Create a MDNode <ScalarType, ScalarType, offset 0, const>
4840 Metadata *Elts2[] = {ScalarType, ScalarType,
4843 MD.getOperand(2)};
4844 return MDNode::get(Context, Elts2);
4845 }
4846 // Create a MDNode <MD, MD, offset 0>
4849 return MDNode::get(Context, Elts);
4850}
4851
4853 Instruction *&Temp) {
4854 if (Opc != Instruction::BitCast)
4855 return nullptr;
4856
4857 Temp = nullptr;
4858 Type *SrcTy = V->getType();
4859 if (SrcTy->isPtrOrPtrVectorTy() && DestTy->isPtrOrPtrVectorTy() &&
4860 SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace()) {
4861 LLVMContext &Context = V->getContext();
4862
4863 // We have no information about target data layout, so we assume that
4864 // the maximum pointer size is 64bit.
4865 Type *MidTy = Type::getInt64Ty(Context);
4866 Temp = CastInst::Create(Instruction::PtrToInt, V, MidTy);
4867
4868 return CastInst::Create(Instruction::IntToPtr, Temp, DestTy);
4869 }
4870
4871 return nullptr;
4872}
4873
4875 if (Opc != Instruction::BitCast)
4876 return nullptr;
4877
4878 Type *SrcTy = C->getType();
4879 if (SrcTy->isPtrOrPtrVectorTy() && DestTy->isPtrOrPtrVectorTy() &&
4880 SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace()) {
4881 LLVMContext &Context = C->getContext();
4882
4883 // We have no information about target data layout, so we assume that
4884 // the maximum pointer size is 64bit.
4885 Type *MidTy = Type::getInt64Ty(Context);
4886
4888 DestTy);
4889 }
4890
4891 return nullptr;
4892}
4893
4894/// Check the debug info version number, if it is out-dated, drop the debug
4895/// info. Return true if module is modified.
4898 return false;
4899
4900 unsigned Version = getDebugMetadataVersionFromModule(M);
4901 if (Version == DEBUG_METADATA_VERSION) {
4902 bool BrokenDebugInfo = false;
4903 if (verifyModule(M, &llvm::errs(), &BrokenDebugInfo))
4904 report_fatal_error("Broken module found, compilation aborted!");
4905 if (!BrokenDebugInfo)
4906 // Everything is ok.
4907 return false;
4908 else {
4909 // Diagnose malformed debug info.
4911 M.getContext().diagnose(Diag);
4912 }
4913 }
4914 bool Modified = StripDebugInfo(M);
4915 if (Modified && Version != DEBUG_METADATA_VERSION) {
4916 // Diagnose a version mismatch.
4917 DiagnosticInfoDebugMetadataVersion DiagVersion(M, Version);
4918 M.getContext().diagnose(DiagVersion);
4919 }
4920 return Modified;
4921}
4922
4923/// This checks for objc retain release marker which should be upgraded. It
4924/// returns true if module is modified.
4926 bool Changed = false;
4927 const char *MarkerKey = "clang.arc.retainAutoreleasedReturnValueMarker";
4928 NamedMDNode *ModRetainReleaseMarker = M.getNamedMetadata(MarkerKey);
4929 if (ModRetainReleaseMarker) {
4930 MDNode *Op = ModRetainReleaseMarker->getOperand(0);
4931 if (Op) {
4932 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(0));
4933 if (ID) {
4934 SmallVector<StringRef, 4> ValueComp;
4935 ID->getString().split(ValueComp, "#");
4936 if (ValueComp.size() == 2) {
4937 std::string NewValue = ValueComp[0].str() + ";" + ValueComp[1].str();
4938 ID = MDString::get(M.getContext(), NewValue);
4939 }
4940 M.addModuleFlag(Module::Error, MarkerKey, ID);
4941 M.eraseNamedMetadata(ModRetainReleaseMarker);
4942 Changed = true;
4943 }
4944 }
4945 }
4946 return Changed;
4947}
4948
4950 // This lambda converts normal function calls to ARC runtime functions to
4951 // intrinsic calls.
4952 auto UpgradeToIntrinsic = [&](const char *OldFunc,
4953 llvm::Intrinsic::ID IntrinsicFunc) {
4954 Function *Fn = M.getFunction(OldFunc);
4955
4956 if (!Fn)
4957 return;
4958
4959 Function *NewFn = llvm::Intrinsic::getDeclaration(&M, IntrinsicFunc);
4960
4961 for (User *U : make_early_inc_range(Fn->users())) {
4962 CallInst *CI = dyn_cast<CallInst>(U);
4963 if (!CI || CI->getCalledFunction() != Fn)
4964 continue;
4965
4966 IRBuilder<> Builder(CI->getParent(), CI->getIterator());
4967 FunctionType *NewFuncTy = NewFn->getFunctionType();
4969
4970 // Don't upgrade the intrinsic if it's not valid to bitcast the return
4971 // value to the return type of the old function.
4972 if (NewFuncTy->getReturnType() != CI->getType() &&
4973 !CastInst::castIsValid(Instruction::BitCast, CI,
4974 NewFuncTy->getReturnType()))
4975 continue;
4976
4977 bool InvalidCast = false;
4978
4979 for (unsigned I = 0, E = CI->arg_size(); I != E; ++I) {
4980 Value *Arg = CI->getArgOperand(I);
4981
4982 // Bitcast argument to the parameter type of the new function if it's
4983 // not a variadic argument.
4984 if (I < NewFuncTy->getNumParams()) {
4985 // Don't upgrade the intrinsic if it's not valid to bitcast the argument
4986 // to the parameter type of the new function.
4987 if (!CastInst::castIsValid(Instruction::BitCast, Arg,
4988 NewFuncTy->getParamType(I))) {
4989 InvalidCast = true;
4990 break;
4991 }
4992 Arg = Builder.CreateBitCast(Arg, NewFuncTy->getParamType(I));
4993 }
4994 Args.push_back(Arg);
4995 }
4996
4997 if (InvalidCast)
4998 continue;
4999
5000 // Create a call instruction that calls the new function.
5001 CallInst *NewCall = Builder.CreateCall(NewFuncTy, NewFn, Args);
5002 NewCall->setTailCallKind(cast<CallInst>(CI)->getTailCallKind());
5003 NewCall->takeName(CI);
5004
5005 // Bitcast the return value back to the type of the old call.
5006 Value *NewRetVal = Builder.CreateBitCast(NewCall, CI->getType());
5007
5008 if (!CI->use_empty())
5009 CI->replaceAllUsesWith(NewRetVal);
5010 CI->eraseFromParent();
5011 }
5012
5013 if (Fn->use_empty())
5014 Fn->eraseFromParent();
5015 };
5016
5017 // Unconditionally convert a call to "clang.arc.use" to a call to
5018 // "llvm.objc.clang.arc.use".
5019 UpgradeToIntrinsic("clang.arc.use", llvm::Intrinsic::objc_clang_arc_use);
5020
5021 // Upgrade the retain release marker. If there is no need to upgrade
5022 // the marker, that means either the module is already new enough to contain
5023 // new intrinsics or it is not ARC. There is no need to upgrade runtime call.
5025 return;
5026
5027 std::pair<const char *, llvm::Intrinsic::ID> RuntimeFuncs[] = {
5028 {"objc_autorelease", llvm::Intrinsic::objc_autorelease},
5029 {"objc_autoreleasePoolPop", llvm::Intrinsic::objc_autoreleasePoolPop},
5030 {"objc_autoreleasePoolPush", llvm::Intrinsic::objc_autoreleasePoolPush},
5031 {"objc_autoreleaseReturnValue",
5032 llvm::Intrinsic::objc_autoreleaseReturnValue},
5033 {"objc_copyWeak", llvm::Intrinsic::objc_copyWeak},
5034 {"objc_destroyWeak", llvm::Intrinsic::objc_destroyWeak},
5035 {"objc_initWeak", llvm::Intrinsic::objc_initWeak},
5036 {"objc_loadWeak", llvm::Intrinsic::objc_loadWeak},
5037 {"objc_loadWeakRetained", llvm::Intrinsic::objc_loadWeakRetained},
5038 {"objc_moveWeak", llvm::Intrinsic::objc_moveWeak},
5039 {"objc_release", llvm::Intrinsic::objc_release},
5040 {"objc_retain", llvm::Intrinsic::objc_retain},
5041 {"objc_retainAutorelease", llvm::Intrinsic::objc_retainAutorelease},
5042 {"objc_retainAutoreleaseReturnValue",
5043 llvm::Intrinsic::objc_retainAutoreleaseReturnValue},
5044 {"objc_retainAutoreleasedReturnValue",
5045 llvm::Intrinsic::objc_retainAutoreleasedReturnValue},
5046 {"objc_retainBlock", llvm::Intrinsic::objc_retainBlock},
5047 {"objc_storeStrong", llvm::Intrinsic::objc_storeStrong},
5048 {"objc_storeWeak", llvm::Intrinsic::objc_storeWeak},
5049 {"objc_unsafeClaimAutoreleasedReturnValue",
5050 llvm::Intrinsic::objc_unsafeClaimAutoreleasedReturnValue},
5051 {"objc_retainedObject", llvm::Intrinsic::objc_retainedObject},
5052 {"objc_unretainedObject", llvm::Intrinsic::objc_unretainedObject},
5053 {"objc_unretainedPointer", llvm::Intrinsic::objc_unretainedPointer},
5054 {"objc_retain_autorelease", llvm::Intrinsic::objc_retain_autorelease},
5055 {"objc_sync_enter", llvm::Intrinsic::objc_sync_enter},
5056 {"objc_sync_exit", llvm::Intrinsic::objc_sync_exit},
5057 {"objc_arc_annotation_topdown_bbstart",
5058 llvm::Intrinsic::objc_arc_annotation_topdown_bbstart},
5059 {"objc_arc_annotation_topdown_bbend",
5060 llvm::Intrinsic::objc_arc_annotation_topdown_bbend},
5061 {"objc_arc_annotation_bottomup_bbstart",
5062 llvm::Intrinsic::objc_arc_annotation_bottomup_bbstart},
5063 {"objc_arc_annotation_bottomup_bbend",
5064 llvm::Intrinsic::objc_arc_annotation_bottomup_bbend}};
5065
5066 for (auto &I : RuntimeFuncs)
5067 UpgradeToIntrinsic(I.first, I.second);
5068}
5069
5071 NamedMDNode *ModFlags = M.getModuleFlagsMetadata();
5072 if (!ModFlags)
5073 return false;
5074
5075 bool HasObjCFlag = false, HasClassProperties = false, Changed = false;
5076 bool HasSwiftVersionFlag = false;
5077 uint8_t SwiftMajorVersion, SwiftMinorVersion;
5078 uint32_t SwiftABIVersion;
5079 auto Int8Ty = Type::getInt8Ty(M.getContext());
5080 auto Int32Ty = Type::getInt32Ty(M.getContext());
5081
5082 for (unsigned I = 0, E = ModFlags->getNumOperands(); I != E; ++I) {
5083 MDNode *Op = ModFlags->getOperand(I);
5084 if (Op->getNumOperands() != 3)
5085 continue;
5086 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
5087 if (!ID)
5088 continue;
5089 auto SetBehavior = [&](Module::ModFlagBehavior B) {
5090 Metadata *Ops[3] = {ConstantAsMetadata::get(ConstantInt::get(
5091 Type::getInt32Ty(M.getContext()), B)),
5092 MDString::get(M.getContext(), ID->getString()),
5093 Op->getOperand(2)};
5094 ModFlags->setOperand(I, MDNode::get(M.getContext(), Ops));
5095 Changed = true;
5096 };
5097
5098 if (ID->getString() == "Objective-C Image Info Version")
5099 HasObjCFlag = true;
5100 if (ID->getString() == "Objective-C Class Properties")
5101 HasClassProperties = true;
5102 // Upgrade PIC from Error/Max to Min.
5103 if (ID->getString() == "PIC Level") {
5104 if (auto *Behavior =
5105 mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0))) {
5106 uint64_t V = Behavior->getLimitedValue();
5107 if (V == Module::Error || V == Module::Max)
5108 SetBehavior(Module::Min);
5109 }
5110 }
5111 // Upgrade "PIE Level" from Error to Max.
5112 if (ID->getString() == "PIE Level")
5113 if (auto *Behavior =
5114 mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0)))
5115 if (Behavior->getLimitedValue() == Module::Error)
5116 SetBehavior(Module::Max);
5117
5118 // Upgrade branch protection and return address signing module flags. The
5119 // module flag behavior for these fields were Error and now they are Min.
5120 if (ID->getString() == "branch-target-enforcement" ||
5121 ID->getString().starts_with("sign-return-address")) {
5122 if (auto *Behavior =
5123 mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0))) {
5124 if (Behavior->getLimitedValue() == Module::Error) {
5125 Type *Int32Ty = Type::getInt32Ty(M.getContext());
5126 Metadata *Ops[3] = {
5127 ConstantAsMetadata::get(ConstantInt::get(Int32Ty, Module::Min)),
5128 Op->getOperand(1), Op->getOperand(2)};
5129 ModFlags->setOperand(I, MDNode::get(M.getContext(), Ops));
5130 Changed = true;
5131 }
5132 }
5133 }
5134
5135 // Upgrade Objective-C Image Info Section. Removed the whitespce in the
5136 // section name so that llvm-lto will not complain about mismatching
5137 // module flags that is functionally the same.
5138 if (ID->getString() == "Objective-C Image Info Section") {
5139 if (auto *Value = dyn_cast_or_null<MDString>(Op->getOperand(2))) {
5140 SmallVector<StringRef, 4> ValueComp;
5141 Value->getString().split(ValueComp, " ");
5142 if (ValueComp.size() != 1) {
5143 std::string NewValue;
5144 for (auto &S : ValueComp)
5145 NewValue += S.str();
5146 Metadata *Ops[3] = {Op->getOperand(0), Op->getOperand(1),
5147 MDString::get(M.getContext(), NewValue)};
5148 ModFlags->setOperand(I, MDNode::get(M.getContext(), Ops));
5149 Changed = true;
5150 }
5151 }
5152 }
5153
5154 // IRUpgrader turns a i32 type "Objective-C Garbage Collection" into i8 value.
5155 // If the higher bits are set, it adds new module flag for swift info.
5156 if (ID->getString() == "Objective-C Garbage Collection") {
5157 auto Md = dyn_cast<ConstantAsMetadata>(Op->getOperand(2));
5158 if (Md) {
5159 assert(Md->getValue() && "Expected non-empty metadata");
5160 auto Type = Md->getValue()->getType();
5161 if (Type == Int8Ty)
5162 continue;
5163 unsigned Val = Md->getValue()->getUniqueInteger().getZExtValue();
5164 if ((Val & 0xff) != Val) {
5165 HasSwiftVersionFlag = true;
5166 SwiftABIVersion = (Val & 0xff00) >> 8;
5167 SwiftMajorVersion = (Val & 0xff000000) >> 24;
5168 SwiftMinorVersion = (Val & 0xff0000) >> 16;
5169 }
5170 Metadata *Ops[3] = {
5172 Op->getOperand(1),
5173 ConstantAsMetadata::get(ConstantInt::get(Int8Ty,Val & 0xff))};
5174 ModFlags->setOperand(I, MDNode::get(M.getContext(), Ops));
5175 Changed = true;
5176 }
5177 }
5178
5179 if (ID->getString() == "amdgpu_code_object_version") {
5180 Metadata *Ops[3] = {
5181 Op->getOperand(0),
5182 MDString::get(M.getContext(), "amdhsa_code_object_version"),
5183 Op->getOperand(2)};
5184 ModFlags->setOperand(I, MDNode::get(M.getContext(), Ops));
5185 Changed = true;
5186 }
5187 }
5188
5189 // "Objective-C Class Properties" is recently added for Objective-C. We
5190 // upgrade ObjC bitcodes to contain a "Objective-C Class Properties" module
5191 // flag of value 0, so we can correclty downgrade this flag when trying to
5192 // link an ObjC bitcode without this module flag with an ObjC bitcode with
5193 // this module flag.
5194 if (HasObjCFlag && !HasClassProperties) {
5195 M.addModuleFlag(llvm::Module::Override, "Objective-C Class Properties",
5196 (uint32_t)0);
5197 Changed = true;
5198 }
5199
5200 if (HasSwiftVersionFlag) {
5201 M.addModuleFlag(Module::Error, "Swift ABI Version",
5202 SwiftABIVersion);
5203 M.addModuleFlag(Module::Error, "Swift Major Version",
5204 ConstantInt::get(Int8Ty, SwiftMajorVersion));
5205 M.addModuleFlag(Module::Error, "Swift Minor Version",
5206 ConstantInt::get(Int8Ty, SwiftMinorVersion));
5207 Changed = true;
5208 }
5209
5210 return Changed;
5211}
5212
5214 auto TrimSpaces = [](StringRef Section) -> std::string {
5215 SmallVector<StringRef, 5> Components;
5216 Section.split(Components, ',');
5217
5218 SmallString<32> Buffer;
5219 raw_svector_ostream OS(Buffer);
5220
5221 for (auto Component : Components)
5222 OS << ',' << Component.trim();
5223
5224 return std::string(OS.str().substr(1));
5225 };
5226
5227 for (auto &GV : M.globals()) {
5228 if (!GV.hasSection())
5229 continue;
5230
5231 StringRef Section = GV.getSection();
5232
5233 if (!Section.starts_with("__DATA, __objc_catlist"))
5234 continue;
5235
5236 // __DATA, __objc_catlist, regular, no_dead_strip
5237 // __DATA,__objc_catlist,regular,no_dead_strip
5238 GV.setSection(TrimSpaces(Section));
5239 }
5240}
5241
5242namespace {
5243// Prior to LLVM 10.0, the strictfp attribute could be used on individual
5244// callsites within a function that did not also have the strictfp attribute.
5245// Since 10.0, if strict FP semantics are needed within a function, the
5246// function must have the strictfp attribute and all calls within the function
5247// must also have the strictfp attribute. This latter restriction is
5248// necessary to prevent unwanted libcall simplification when a function is
5249// being cloned (such as for inlining).
5250//
5251// The "dangling" strictfp attribute usage was only used to prevent constant
5252// folding and other libcall simplification. The nobuiltin attribute on the
5253// callsite has the same effect.
5254struct StrictFPUpgradeVisitor : public InstVisitor<StrictFPUpgradeVisitor> {
5255 StrictFPUpgradeVisitor() = default;
5256
5257 void visitCallBase(CallBase &Call) {
5258 if (!Call.isStrictFP())
5259 return;
5260 if (isa<ConstrainedFPIntrinsic>(&Call))
5261 return;
5262 // If we get here, the caller doesn't have the strictfp attribute
5263 // but this callsite does. Replace the strictfp attribute with nobuiltin.
5264 Call.removeFnAttr(Attribute::StrictFP);
5265 Call.addFnAttr(Attribute::NoBuiltin);
5266 }
5267};
5268} // namespace
5269
5271 // If a function definition doesn't have the strictfp attribute,
5272 // convert any callsite strictfp attributes to nobuiltin.
5273 if (!F.isDeclaration() && !F.hasFnAttribute(Attribute::StrictFP)) {
5274 StrictFPUpgradeVisitor SFPV;
5275 SFPV.visit(F);
5276 }
5277
5278 // Remove all incompatibile attributes from function.
5279 F.removeRetAttrs(AttributeFuncs::typeIncompatible(F.getReturnType()));
5280 for (auto &Arg : F.args())
5281 Arg.removeAttrs(AttributeFuncs::typeIncompatible(Arg.getType()));
5282
5283 // Older versions of LLVM treated an "implicit-section-name" attribute
5284 // similarly to directly setting the section on a Function.
5285 if (Attribute A = F.getFnAttribute("implicit-section-name");
5286 A.isValid() && A.isStringAttribute()) {
5287 F.setSection(A.getValueAsString());
5288 F.removeFnAttr("implicit-section-name");
5289 }
5290}
5291
5292static bool isOldLoopArgument(Metadata *MD) {
5293 auto *T = dyn_cast_or_null<MDTuple>(MD);
5294 if (!T)
5295 return false;
5296 if (T->getNumOperands() < 1)
5297 return false;
5298 auto *S = dyn_cast_or_null<MDString>(T->getOperand(0));
5299 if (!S)
5300 return false;
5301 return S->getString().starts_with("llvm.vectorizer.");
5302}
5303
5305 StringRef OldPrefix = "llvm.vectorizer.";
5306 assert(OldTag.starts_with(OldPrefix) && "Expected old prefix");
5307
5308 if (OldTag == "llvm.vectorizer.unroll")
5309 return MDString::get(C, "llvm.loop.interleave.count");
5310
5311 return MDString::get(
5312 C, (Twine("llvm.loop.vectorize.") + OldTag.drop_front(OldPrefix.size()))
5313 .str());
5314}
5315
5317 auto *T = dyn_cast_or_null<MDTuple>(MD);
5318 if (!T)
5319 return MD;
5320 if (T->getNumOperands() < 1)
5321 return MD;
5322 auto *OldTag = dyn_cast_or_null<MDString>(T->getOperand(0));
5323 if (!OldTag)
5324 return MD;
5325 if (!OldTag->getString().starts_with("llvm.vectorizer."))
5326 return MD;
5327
5328 // This has an old tag. Upgrade it.
5330 Ops.reserve(T->getNumOperands());
5331 Ops.push_back(upgradeLoopTag(T->getContext(), OldTag->getString()));
5332 for (unsigned I = 1, E = T->getNumOperands(); I != E; ++I)
5333 Ops.push_back(T->getOperand(I));
5334
5335 return MDTuple::get(T->getContext(), Ops);
5336}
5337
5339 auto *T = dyn_cast<MDTuple>(&N);
5340 if (!T)
5341 return &N;
5342
5343 if (none_of(T->operands(), isOldLoopArgument))
5344 return &N;
5345
5347 Ops.reserve(T->getNumOperands());
5348 for (Metadata *MD : T->operands())
5350
5351 return MDTuple::get(T->getContext(), Ops);
5352}
5353
5355 Triple T(TT);
5356 // The only data layout upgrades needed for pre-GCN, SPIR or SPIRV are setting
5357 // the address space of globals to 1. This does not apply to SPIRV Logical.
5358 if (((T.isAMDGPU() && !T.isAMDGCN()) ||
5359 (T.isSPIR() || (T.isSPIRV() && !T.isSPIRVLogical()))) &&
5360 !DL.contains("-G") && !DL.starts_with("G")) {
5361 return DL.empty() ? std::string("G1") : (DL + "-G1").str();
5362 }
5363
5364 if (T.isRISCV64()) {
5365 // Make i32 a native type for 64-bit RISC-V.
5366 auto I = DL.find("-n64-");
5367 if (I != StringRef::npos)
5368 return (DL.take_front(I) + "-n32:64-" + DL.drop_front(I + 5)).str();
5369 return DL.str();
5370 }
5371
5372 std::string Res = DL.str();
5373 // AMDGCN data layout upgrades.
5374 if (T.isAMDGCN()) {
5375 // Define address spaces for constants.
5376 if (!DL.contains("-G") && !DL.starts_with("G"))
5377 Res.append(Res.empty() ? "G1" : "-G1");
5378
5379 // Add missing non-integral declarations.
5380 // This goes before adding new address spaces to prevent incoherent string
5381 // values.
5382 if (!DL.contains("-ni") && !DL.starts_with("ni"))
5383 Res.append("-ni:7:8:9");
5384 // Update ni:7 to ni:7:8:9.
5385 if (DL.ends_with("ni:7"))
5386 Res.append(":8:9");
5387 if (DL.ends_with("ni:7:8"))
5388 Res.append(":9");
5389
5390 // Add sizing for address spaces 7 and 8 (fat raw buffers and buffer
5391 // resources) An empty data layout has already been upgraded to G1 by now.
5392 if (!DL.contains("-p7") && !DL.starts_with("p7"))
5393 Res.append("-p7:160:256:256:32");
5394 if (!DL.contains("-p8") && !DL.starts_with("p8"))
5395 Res.append("-p8:128:128");
5396 if (!DL.contains("-p9") && !DL.starts_with("p9"))
5397 Res.append("-p9:192:256:256:32");
5398
5399 return Res;
5400 }
5401
5402 if (!T.isX86())
5403 return Res;
5404
5405 // If the datalayout matches the expected format, add pointer size address
5406 // spaces to the datalayout.
5407 std::string AddrSpaces = "-p270:32:32-p271:32:32-p272:64:64";
5408 if (StringRef Ref = Res; !Ref.contains(AddrSpaces)) {
5410 Regex R("(e-m:[a-z](-p:32:32)?)(-[if]64:.*$)");
5411 if (R.match(Res, &Groups))
5412 Res = (Groups[1] + AddrSpaces + Groups[3]).str();
5413 }
5414
5415 // i128 values need to be 16-byte-aligned. LLVM already called into libgcc
5416 // for i128 operations prior to this being reflected in the data layout, and
5417 // clang mostly produced LLVM IR that already aligned i128 to 16 byte
5418 // boundaries, so although this is a breaking change, the upgrade is expected
5419 // to fix more IR than it breaks.
5420 // Intel MCU is an exception and uses 4-byte-alignment.
5421 if (!T.isOSIAMCU()) {
5422 std::string I128 = "-i128:128";
5423 if (StringRef Ref = Res; !Ref.contains(I128)) {
5425 Regex R("^(e(-[mpi][^-]*)*)((-[^mpi][^-]*)*)$");
5426 if (R.match(Res, &Groups))
5427 Res = (Groups[1] + I128 + Groups[3]).str();
5428 }
5429 }
5430
5431 // For 32-bit MSVC targets, raise the alignment of f80 values to 16 bytes.
5432 // Raising the alignment is safe because Clang did not produce f80 values in
5433 // the MSVC environment before this upgrade was added.
5434 if (T.isWindowsMSVCEnvironment() && !T.isArch64Bit()) {
5435 StringRef Ref = Res;
5436 auto I = Ref.find("-f80:32-");
5437 if (I != StringRef::npos)
5438 Res = (Ref.take_front(I) + "-f80:128-" + Ref.drop_front(I + 8)).str();
5439 }
5440
5441 return Res;
5442}
5443
5445 StringRef FramePointer;
5446 Attribute A = B.getAttribute("no-frame-pointer-elim");
5447 if (A.isValid()) {
5448 // The value can be "true" or "false".
5449 FramePointer = A.getValueAsString() == "true" ? "all" : "none";
5450 B.removeAttribute("no-frame-pointer-elim");
5451 }
5452 if (B.contains("no-frame-pointer-elim-non-leaf")) {
5453 // The value is ignored. "no-frame-pointer-elim"="true" takes priority.
5454 if (FramePointer != "all")
5455 FramePointer = "non-leaf";
5456 B.removeAttribute("no-frame-pointer-elim-non-leaf");
5457 }
5458 if (!FramePointer.empty())
5459 B.addAttribute("frame-pointer", FramePointer);
5460
5461 A = B.getAttribute("null-pointer-is-valid");
5462 if (A.isValid()) {
5463 // The value can be "true" or "false".
5464 bool NullPointerIsValid = A.getValueAsString() == "true";
5465 B.removeAttribute("null-pointer-is-valid");
5466 if (NullPointerIsValid)
5467 B.addAttribute(Attribute::NullPointerIsValid);
5468 }
5469}
5470
5471void llvm::UpgradeOperandBundles(std::vector<OperandBundleDef> &Bundles) {
5472 // clang.arc.attachedcall bundles are now required to have an operand.
5473 // If they don't, it's okay to drop them entirely: when there is an operand,
5474 // the "attachedcall" is meaningful and required, but without an operand,
5475 // it's just a marker NOP. Dropping it merely prevents an optimization.
5476 erase_if(Bundles, [&](OperandBundleDef &OBD) {
5477 return OBD.getTag() == "clang.arc.attachedcall" &&
5478 OBD.inputs().empty();
5479 });
5480}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
unsigned Intr
amdgpu AMDGPU Register Bank Select
static Value * upgradeX86VPERMT2Intrinsics(IRBuilder<> &Builder, CallBase &CI, bool ZeroMask, bool IndexForm)
static Metadata * upgradeLoopArgument(Metadata *MD)
static bool upgradeIntrinsicFunction1(Function *F, Function *&NewFn, bool CanUpgradeDebugIntrinsicsToRecords)
static Value * upgradeX86PSLLDQIntrinsics(IRBuilder<> &Builder, Value *Op, unsigned Shift)
static bool upgradeRetainReleaseMarker(Module &M)
This checks for objc retain release marker which should be upgraded.
static Value * upgradeX86vpcom(IRBuilder<> &Builder, CallBase &CI, unsigned Imm, bool IsSigned)
static Value * upgradeMaskToInt(IRBuilder<> &Builder, CallBase &CI)
static Value * upgradeX86Rotate(IRBuilder<> &Builder, CallBase &CI, bool IsRotateRight)
static Intrinsic::ID shouldUpgradeNVPTXBF16Intrinsic(StringRef Name)
static MDString * upgradeLoopTag(LLVMContext &C, StringRef OldTag)
static bool upgradeX86MaskedFPCompare(Function *F, Intrinsic::ID IID, Function *&NewFn)
Definition: AutoUpgrade.cpp:88
static Value * upgradeX86ALIGNIntrinsics(IRBuilder<> &Builder, Value *Op0, Value *Op1, Value *Shift, Value *Passthru, Value *Mask, bool IsVALIGN)
static Value * upgradeAbs(IRBuilder<> &Builder, CallBase &CI)
static Value * emitX86Select(IRBuilder<> &Builder, Value *Mask, Value *Op0, Value *Op1)
static Value * upgradeMaskedMove(IRBuilder<> &Builder, CallBase &CI)
static bool upgradeX86IntrinsicFunction(Function *F, StringRef Name, Function *&NewFn)
static Value * applyX86MaskOn1BitsVec(IRBuilder<> &Builder, Value *Vec, Value *Mask)
static bool shouldUpgradeX86Intrinsic(Function *F, StringRef Name)
static Value * upgradeX86PSRLDQIntrinsics(IRBuilder<> &Builder, Value *Op, unsigned Shift)
static bool isOldLoopArgument(Metadata *MD)
static Value * upgradeARMIntrinsicCall(StringRef Name, CallBase *CI, Function *F, IRBuilder<> &Builder)
static bool upgradeX86IntrinsicsWith8BitMask(Function *F, Intrinsic::ID IID, Function *&NewFn)
Definition: AutoUpgrade.cpp:72
static Value * upgradeAMDGCNIntrinsicCall(StringRef Name, CallBase *CI, Function *F, IRBuilder<> &Builder)
static Value * upgradeMaskedLoad(IRBuilder<> &Builder, Value *Ptr, Value *Passthru, Value *Mask, bool Aligned)
static bool upgradeX86BF16Intrinsic(Function *F, Intrinsic::ID IID, Function *&NewFn)
Definition: AutoUpgrade.cpp:99
static MDType * unwrapMAVOp(CallBase *CI, unsigned Op)
Helper to unwrap intrinsic call MetadataAsValue operands.
static bool upgradeArmOrAarch64IntrinsicFunction(bool IsArm, Function *F, StringRef Name, Function *&NewFn)
static Value * getX86MaskVec(IRBuilder<> &Builder, Value *Mask, unsigned NumElts)
static Value * emitX86ScalarSelect(IRBuilder<> &Builder, Value *Mask, Value *Op0, Value *Op1)
static Value * upgradeX86ConcatShift(IRBuilder<> &Builder, CallBase &CI, bool IsShiftRight, bool ZeroMask)
static void rename(GlobalValue *GV)
Definition: AutoUpgrade.cpp:52
static bool upgradePTESTIntrinsic(Function *F, Intrinsic::ID IID, Function *&NewFn)
Definition: AutoUpgrade.cpp:56
static bool upgradeX86BF16DPIntrinsic(Function *F, Intrinsic::ID IID, Function *&NewFn)
static cl::opt< bool > DisableAutoUpgradeDebugInfo("disable-auto-upgrade-debug-info", cl::desc("Disable autoupgrade of debug info"))
static Value * upgradeMaskedCompare(IRBuilder<> &Builder, CallBase &CI, unsigned CC, bool Signed)
static Value * upgradeX86BinaryIntrinsics(IRBuilder<> &Builder, CallBase &CI, Intrinsic::ID IID)
static Value * upgradeX86MaskedShift(IRBuilder<> &Builder, CallBase &CI, Intrinsic::ID IID)
static bool upgradeAVX512MaskToSelect(StringRef Name, IRBuilder<> &Builder, CallBase &CI, Value *&Rep)
static void upgradeDbgIntrinsicToDbgRecord(StringRef Name, CallBase *CI)
Convert debug intrinsic calls to non-instruction debug records.
static Value * upgradePMULDQ(IRBuilder<> &Builder, CallBase &CI, bool IsSigned)
static Value * upgradeMaskedStore(IRBuilder<> &Builder, Value *Ptr, Value *Data, Value *Mask, bool Aligned)
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
@ Default
Definition: DwarfDebug.cpp:87
This file contains constants used for implementing Dwarf debug support.
uint64_t Addr
std::string Name
uint64_t Size
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
#define R2(n)
This file contains the declarations for metadata subclasses.
Module.h This file contains the declarations for the Module class.
uint64_t High
IntegerType * Int32Ty
LLVMContext & Context
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
raw_pwrite_stream & OS
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:40
static const X86InstrFMA3Group Groups[]
Value * RHS
Value * LHS
Class for arbitrary precision integers.
Definition: APInt.h:76
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
Class to represent array types.
Definition: DerivedTypes.h:371
Type * getElementType() const
Definition: DerivedTypes.h:384
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:748
void setVolatile(bool V)
Specify whether this is a volatile RMW or not.
Definition: Instructions.h:881
BinOp
This enumeration lists the possible modifications atomicrmw can make.
Definition: Instructions.h:760
@ FAdd
*p = old + v
Definition: Instructions.h:785
@ UIncWrap
Increment one up to a maximum value.
Definition: Instructions.h:800
@ UDecWrap
Decrement one until a minimum value or zero.
Definition: Instructions.h:804
AttributeSet getFnAttrs() const
The function attributes are returned.
static AttributeList get(LLVMContext &C, ArrayRef< std::pair< unsigned, Attribute > > Attrs)
Create an AttributeList with the specified parameters in it.
AttributeSet getRetAttrs() const
The attributes for the ret value are returned.
void insertDbgRecordBefore(DbgRecord *DR, InstListType::iterator Here)
Insert a DbgRecord into a block at the position given by Here.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1494
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Definition: InstrTypes.h:1742
Value * getCalledOperand() const
Definition: InstrTypes.h:1735
void setAttributes(AttributeList A)
Set the parameter attributes for this call.
Definition: InstrTypes.h:1823
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1687
FunctionType * getFunctionType() const
Definition: InstrTypes.h:1600
Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
Definition: InstrTypes.h:1678
void setCalledOperand(Value *V)
Definition: InstrTypes.h:1778
unsigned arg_size() const
Definition: InstrTypes.h:1685
AttributeList getAttributes() const
Return the parameter attributes for this call.
Definition: InstrTypes.h:1819
void setCalledFunction(Function *Fn)
Sets the function called, including updating the function type.
Definition: InstrTypes.h:1781
This class represents a function call, abstracting a target machine's calling convention.
void setTailCallKind(TailCallKind TCK)
static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name, BasicBlock::iterator InsertBefore)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
static bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:993
static ConstantAggregateZero * get(Type *Ty)
Definition: Constants.cpp:1663
static Constant * get(ArrayType *T, ArrayRef< Constant * > V)
Definition: Constants.cpp:1291
static ConstantAsMetadata * get(Constant *C)
Definition: Metadata.h:528
static Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2126
static Constant * getPointerCast(Constant *C, Type *Ty)
Create a BitCast, AddrSpaceCast, or a PtrToInt cast constant expression.
Definition: Constants.cpp:2072
static Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2112
This is the shared class of boolean and integer constants.
Definition: Constants.h:80
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition: Constants.h:205
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:154
static Constant * get(StructType *T, ArrayRef< Constant * > V)
Definition: Constants.cpp:1356
static ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
Definition: Constants.cpp:1499
This is an important base class in LLVM.
Definition: Constant.h:41
static Constant * getAllOnesValue(Type *Ty)
Definition: Constants.cpp:417
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:370
DWARF expression.
static DIExpression * append(const DIExpression *Expr, ArrayRef< uint64_t > Ops)
Append the opcodes Ops to DIExpr.
This class represents an Operation in the Expression.
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
Record of a variable value-assignment, aka a non instruction representation of the dbg....
Diagnostic information for debug metadata version reporting.
Diagnostic information for stripping invalid debug metadata.
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
Definition: Type.cpp:692
Class to represent function types.
Definition: DerivedTypes.h:103
Type * getParamType(unsigned i) const
Parameter type accessors.
Definition: DerivedTypes.h:135
Type * getReturnType() const
Definition: DerivedTypes.h:124
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Definition: Function.h:164
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition: Function.h:202
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition: Function.h:232
void eraseFromParent()
eraseFromParent - This method unlinks 'this' from the containing module and deletes it.
Definition: Function.cpp:403
size_t arg_size() const
Definition: Function.h:851
Type * getReturnType() const
Returns the type of the ret val.
Definition: Function.h:207
Argument * getArg(unsigned i) const
Definition: Function.h:836
LinkageTypes getLinkage() const
Definition: GlobalValue.h:546
Type * getValueType() const
Definition: GlobalValue.h:296
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool hasInitializer() const
Definitions have initializers, declarations don't.
ConstantInt * getInt1(bool V)
Get a constant value representing either true or false.
Definition: IRBuilder.h:461
Value * CreateFSub(Value *L, Value *R, const Twine &Name="", MDNode *FPMD=nullptr)
Definition: IRBuilder.h:1560
Value * CreateInsertElement(Type *VecTy, Value *NewElt, Value *Idx, const Twine &Name="")
Definition: IRBuilder.h:2472
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
Definition: IRBuilder.h:511
Value * CreateInsertValue(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &Name="")
Definition: IRBuilder.h:2523
Value * CreateFDiv(Value *L, Value *R, const Twine &Name="", MDNode *FPMD=nullptr)
Definition: IRBuilder.h:1614
CallInst * CreateInsertVector(Type *DstType, Value *SrcVec, Value *SubVec, Value *Idx, const Twine &Name="")
Create a call to the vector.insert intrinsic.
Definition: IRBuilder.h:1045
Value * CreateSIToFP(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2094
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
Definition: IRBuilder.h:2460
IntegerType * getIntNTy(unsigned N)
Fetch the type representing an N-bit integer.
Definition: IRBuilder.h:539
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Definition: IRBuilder.h:1807
Value * CreateFAdd(Value *L, Value *R, const Twine &Name="", MDNode *FPMD=nullptr)
Definition: IRBuilder.h:1533
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2170
Value * CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name="")
Return a vector value that contains.
Definition: IRBuilder.cpp:1214
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
Definition: IRBuilder.h:2516
CallInst * CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment, Value *Mask, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Load intrinsic.
Definition: IRBuilder.cpp:578
Value * CreateICmpSGE(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2269
Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
Definition: IRBuilder.cpp:1110
Value * CreateSExt(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2033
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Definition: IRBuilder.h:526
ConstantInt * getInt8(uint8_t C)
Get a constant 8-bit value.
Definition: IRBuilder.h:476
Value * CreateUIToFP(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Definition: IRBuilder.h:2081
IntegerType * getInt16Ty()
Fetch the type representing a 16-bit integer.
Definition: IRBuilder.h:521
Value * CreateNeg(Value *V, const Twine &Name="", bool HasNSW=false)
Definition: IRBuilder.h:1721
Value * CreateICmpSLE(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2277
Value * CreateNot(Value *V, const Twine &Name="")
Definition: IRBuilder.h:1749
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2241
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1344
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2127
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition: IRBuilder.h:1790
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1416
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Definition: IRBuilder.h:2021
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
Definition: IRBuilder.h:2494
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1475
CallInst * CreateMaskedStore(Value *Val, Value *Ptr, Align Alignment, Value *Mask)
Create a call to Masked Store intrinsic.
Definition: IRBuilder.cpp:598
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1327
ConstantInt * getFalse()
Get the constant value for i1 false.
Definition: IRBuilder.h:471
Value * CreateIsNotNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg != 0.
Definition: IRBuilder.h:2549
AtomicRMWInst * CreateAtomicRMW(AtomicRMWInst::BinOp Op, Value *Ptr, Value *Val, MaybeAlign Align, AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Definition: IRBuilder.h:1854
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
Definition: IRBuilder.h:2007
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1497
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
Definition: IRBuilder.h:569
Value * CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2253
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Definition: IRBuilder.h:2196
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:180
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
Definition: IRBuilder.h:1826
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2412
Value * CreateAShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1456
Value * CreateFPExt(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2110
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1519
Value * CreateICmpULE(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2261
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2351
Value * CreateFMul(Value *L, Value *R, const Twine &Name="", MDNode *FPMD=nullptr)
Definition: IRBuilder.h:1587
Value * CreateFNeg(Value *V, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:1730
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Definition: IRBuilder.h:516
Type * getBFloatTy()
Fetch the type representing a 16-bit brain floating point value.
Definition: IRBuilder.h:549
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1361
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2666
Base class for instruction visitors.
Definition: InstVisitor.h:78
RetTy visitCallBase(CallBase &I)
Definition: InstVisitor.h:267
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:454
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:83
const BasicBlock * getParent() const
Definition: Instruction.h:152
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
Definition: Metadata.cpp:1636
unsigned getBitWidth() const
Get the number of bits in this IntegerType.
Definition: DerivedTypes.h:72
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
An instruction for reading from memory.
Definition: Instructions.h:184
Metadata node.
Definition: Metadata.h:1067
const MDOperand & getOperand(unsigned I) const
Definition: Metadata.h:1428
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1541
unsigned getNumOperands() const
Return number of MDNode operands.
Definition: Metadata.h:1434
LLVMContext & getContext() const
Definition: Metadata.h:1231
A single uniqued string.
Definition: Metadata.h:720
static MDString * get(LLVMContext &Context, StringRef Str)
Definition: Metadata.cpp:600
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1498
Metadata wrapper in the Value hierarchy.
Definition: Metadata.h:176
static MetadataAsValue * get(LLVMContext &Context, Metadata *MD)
Definition: Metadata.cpp:103
Root of the metadata hierarchy.
Definition: Metadata.h:62
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
ModFlagBehavior
This enumeration defines the supported behaviors of module flags.
Definition: Module.h:115
@ Override
Uses the specified value, regardless of the behavior or value of the other module.
Definition: Module.h:136
@ Error
Emits an error if two values disagree, otherwise the resulting value is that of the operands.
Definition: Module.h:118
@ Min
Takes the min of the two values, which are required to be integers.
Definition: Module.h:150
@ Max
Takes the max of the two values, which are required to be integers.
Definition: Module.h:147
bool IsNewDbgInfoFormat
Is this Module using intrinsics to record the position of debugging information, or non-intrinsic rec...
Definition: Module.h:219
A tuple of MDNodes.
Definition: Metadata.h:1729
void setOperand(unsigned I, MDNode *New)
Definition: Metadata.cpp:1390
MDNode * getOperand(unsigned i) const
Definition: Metadata.cpp:1382
unsigned getNumOperands() const
Definition: Metadata.cpp:1378
A container for an operand bundle being viewed as a set of values rather than a set of uses.
Definition: InstrTypes.h:1447
ArrayRef< InputTy > inputs() const
Definition: InstrTypes.h:1462
StringRef getTag() const
Definition: InstrTypes.h:1470
Class to represent pointers.
Definition: DerivedTypes.h:646
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
Definition: DerivedTypes.h:662
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1827
bool match(StringRef String, SmallVectorImpl< StringRef > *Matches=nullptr, std::string *Error=nullptr) const
matches - Match the regex against a given String.
Definition: Regex.cpp:83
Class to represent scalable SIMD vectors.
Definition: DerivedTypes.h:586
uint64_t getMinNumElements() const
Get the minimum number of elements in this vector.
Definition: DerivedTypes.h:634
ArrayRef< int > getShuffleMask() const
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:26
size_t size() const
Definition: SmallVector.h:91
void reserve(size_type N)
Definition: SmallVector.h:676
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
An instruction for storing to memory.
Definition: Instructions.h:317
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:257
constexpr bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:134
StringRef drop_front(size_t N=1) const
Return a StringRef equal to 'this' but with the first N elements dropped.
Definition: StringRef.h:595
constexpr size_t size() const
size - Get the string size.
Definition: StringRef.h:137
static constexpr size_t npos
Definition: StringRef.h:52
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:44
StringSwitch & Case(StringLiteral S, T Value)
Definition: StringSwitch.h:69
R Default(T Value)
Definition: StringSwitch.h:182
StringSwitch & StartsWith(StringLiteral S, T Value)
Definition: StringSwitch.h:83
StringSwitch & Cases(StringLiteral S0, StringLiteral S1, T Value)
Definition: StringSwitch.h:90
Class to represent struct types.
Definition: DerivedTypes.h:216
static StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Definition: Type.cpp:373
unsigned getNumElements() const
Random access to the elements.
Definition: DerivedTypes.h:341
Type * getElementType(unsigned N) const
Definition: DerivedTypes.h:342
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
static Type * getHalfTy(LLVMContext &C)
static Type * getBFloatTy(LLVMContext &C)
bool isBFloatTy() const
Return true if this is 'bfloat', a 16-bit bfloat type.
Definition: Type.h:146
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
static IntegerType * getInt8Ty(LLVMContext &C)
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition: Type.h:262
static IntegerType * getInt32Ty(LLVMContext &C)
static IntegerType * getInt64Ty(LLVMContext &C)
static Type * getFloatTy(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:228
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition: Type.h:216
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition: Type.h:348
Value * getOperand(unsigned i) const
Definition: User.h:169
unsigned getNumOperands() const
Definition: User.h:191
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:377
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:534
iterator_range< user_iterator > users()
Definition: Value.h:421
bool use_empty() const
Definition: Value.h:344
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1074
bool hasName() const
Definition: Value.h:261
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:383
Base class of all SIMD vector types.
Definition: DerivedTypes.h:403
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:187
self_iterator getIterator()
Definition: ilist_node.h:109
A raw_ostream that writes to an SmallVector or SmallString.
Definition: raw_ostream.h:690
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
AttributeMask typeIncompatible(Type *Ty, AttributeSafetyKind ASK=ASK_ALL)
Which attributes cannot be applied to a type.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
void getIntrinsicInfoTableEntries(ID id, SmallVectorImpl< IITDescriptor > &T)
Return the IIT table descriptor for the specified intrinsic into an array of IITDescriptors.
Definition: Function.cpp:1313
std::optional< Function * > remangleIntrinsicFunction(Function *F)
Definition: Function.cpp:1777
StringRef getName(ID id)
Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
Definition: Function.cpp:1027
AttributeList getAttributes(LLVMContext &C, ID id)
Return the attributes for an intrinsic.
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1469
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:456
void UpgradeIntrinsicCall(CallBase *CB, Function *NewFn)
This is the complement to the above, replacing a specific call to an intrinsic function with a call t...
void UpgradeSectionAttributes(Module &M)
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition: STLExtras.h:1680
void UpgradeInlineAsmString(std::string *AsmStr)
Upgrade comment in call to inline asm that represents an objc retain release marker.
bool isValidAtomicOrdering(Int I)
bool UpgradeIntrinsicFunction(Function *F, Function *&NewFn, bool CanUpgradeDebugIntrinsicsToRecords=true)
This is a more granular function that simply checks an intrinsic function for upgrading,...
MDNode * upgradeInstructionLoopAttachment(MDNode &N)
Upgrade the loop attachment metadata node.
void UpgradeAttributes(AttrBuilder &B)
Upgrade attributes that changed format or kind.
void UpgradeCallsToIntrinsic(Function *F)
This is an auto-upgrade hook for any old intrinsic function syntaxes which need to have both the func...
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:656
bool UpgradeModuleFlags(Module &M)
This checks for module flags which should be upgraded.
Op::Description Desc
void UpgradeOperandBundles(std::vector< OperandBundleDef > &OperandBundles)
Upgrade operand bundles (without knowing about their user instruction).
Constant * UpgradeBitCastExpr(unsigned Opc, Constant *C, Type *DestTy)
This is an auto-upgrade for bitcast constant expression between pointers with different address space...
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:275
std::string UpgradeDataLayoutString(StringRef DL, StringRef Triple)
Upgrade the datalayout string by adding a section for address space pointers.
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1736
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:156
GlobalVariable * UpgradeGlobalVariable(GlobalVariable *GV)
This checks for global variables which should be upgraded.
unsigned getDebugMetadataVersionFromModule(const Module &M)
Return Debug Info Metadata Version by checking module flags.
Definition: DebugInfo.cpp:928
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
bool StripDebugInfo(Module &M)
Strip debug info in the module if it exists.
Definition: DebugInfo.cpp:594
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ Ref
The access may reference the value stored in memory.
Instruction * UpgradeBitCastInst(unsigned Opc, Value *V, Type *DestTy, Instruction *&Temp)
This is an auto-upgrade for bitcast between pointers with different address spaces: the instruction i...
@ Dynamic
Denotes mode unknown at compile time.
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition: STLExtras.h:2051
bool UpgradeDebugInfo(Module &M)
Check the debug info version number, if it is out-dated, drop the debug info.
void UpgradeFunctionAttributes(Function &F)
Correct any IR that is relying on old function attribute behavior.
MDNode * UpgradeTBAANode(MDNode &TBAANode)
If the given TBAA tag uses the scalar TBAA format, create a new node corresponding to the upgrade to ...
void UpgradeARCRuntime(Module &M)
Convert calls to ARC runtime functions to intrinsic calls and upgrade the old retain release marker t...
@ DEBUG_METADATA_VERSION
Definition: Metadata.h:52
bool verifyModule(const Module &M, raw_ostream *OS=nullptr, bool *BrokenDebugInfo=nullptr)
Check a module for errors.
Definition: Verifier.cpp:7073
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Description of the encoding of one expression Op.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117