Bug Summary

File:build/source/llvm/lib/IR/AutoUpgrade.cpp
Warning:line 3980, column 24
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name AutoUpgrade.cpp -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/source/build-llvm -resource-dir /usr/lib/llvm-16/lib/clang/16.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/IR -I /build/source/llvm/lib/IR -I include -I /build/source/llvm/include -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-16/lib/clang/16.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/source/build-llvm=build-llvm -fmacro-prefix-map=/build/source/= -fcoverage-prefix-map=/build/source/build-llvm=build-llvm -fcoverage-prefix-map=/build/source/= -source-date-epoch 1668078801 -O3 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -Wno-misleading-indentation -std=c++17 -fdeprecated-macro -fdebug-compilation-dir=/build/source/build-llvm -fdebug-prefix-map=/build/source/build-llvm=build-llvm -fdebug-prefix-map=/build/source/= -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-11-10-135928-647445-1 -x c++ /build/source/llvm/lib/IR/AutoUpgrade.cpp
1//===-- AutoUpgrade.cpp - Implement auto-upgrade helper functions ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the auto-upgrade helper functions.
10// This is where deprecated IR intrinsics and other IR features are updated to
11// current specifications.
12//
13//===----------------------------------------------------------------------===//
14
15#include "llvm/IR/AutoUpgrade.h"
16#include "llvm/ADT/StringSwitch.h"
17#include "llvm/ADT/Triple.h"
18#include "llvm/IR/Constants.h"
19#include "llvm/IR/DebugInfo.h"
20#include "llvm/IR/DiagnosticInfo.h"
21#include "llvm/IR/Function.h"
22#include "llvm/IR/IRBuilder.h"
23#include "llvm/IR/InstVisitor.h"
24#include "llvm/IR/Instruction.h"
25#include "llvm/IR/IntrinsicInst.h"
26#include "llvm/IR/Intrinsics.h"
27#include "llvm/IR/IntrinsicsAArch64.h"
28#include "llvm/IR/IntrinsicsARM.h"
29#include "llvm/IR/IntrinsicsX86.h"
30#include "llvm/IR/LLVMContext.h"
31#include "llvm/IR/Module.h"
32#include "llvm/IR/Verifier.h"
33#include "llvm/Support/ErrorHandling.h"
34#include "llvm/Support/Regex.h"
35#include <cstring>
36using namespace llvm;
37
38static void rename(GlobalValue *GV) { GV->setName(GV->getName() + ".old"); }
39
40// Upgrade the declarations of the SSE4.1 ptest intrinsics whose arguments have
41// changed their type from v4f32 to v2i64.
42static bool UpgradePTESTIntrinsic(Function* F, Intrinsic::ID IID,
43 Function *&NewFn) {
44 // Check whether this is an old version of the function, which received
45 // v4f32 arguments.
46 Type *Arg0Type = F->getFunctionType()->getParamType(0);
47 if (Arg0Type != FixedVectorType::get(Type::getFloatTy(F->getContext()), 4))
48 return false;
49
50 // Yes, it's old, replace it with new version.
51 rename(F);
52 NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
53 return true;
54}
55
56// Upgrade the declarations of intrinsic functions whose 8-bit immediate mask
57// arguments have changed their type from i32 to i8.
58static bool UpgradeX86IntrinsicsWith8BitMask(Function *F, Intrinsic::ID IID,
59 Function *&NewFn) {
60 // Check that the last argument is an i32.
61 Type *LastArgType = F->getFunctionType()->getParamType(
62 F->getFunctionType()->getNumParams() - 1);
63 if (!LastArgType->isIntegerTy(32))
64 return false;
65
66 // Move this function aside and map down.
67 rename(F);
68 NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
69 return true;
70}
71
72// Upgrade the declaration of fp compare intrinsics that change return type
73// from scalar to vXi1 mask.
74static bool UpgradeX86MaskedFPCompare(Function *F, Intrinsic::ID IID,
75 Function *&NewFn) {
76 // Check if the return type is a vector.
77 if (F->getReturnType()->isVectorTy())
78 return false;
79
80 rename(F);
81 NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
82 return true;
83}
84
85static bool UpgradeX86BF16Intrinsic(Function *F, Intrinsic::ID IID,
86 Function *&NewFn) {
87 if (F->getReturnType()->getScalarType()->isBFloatTy())
88 return false;
89
90 rename(F);
91 NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
92 return true;
93}
94
95static bool UpgradeX86BF16DPIntrinsic(Function *F, Intrinsic::ID IID,
96 Function *&NewFn) {
97 if (F->getFunctionType()->getParamType(1)->getScalarType()->isBFloatTy())
98 return false;
99
100 rename(F);
101 NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
102 return true;
103}
104
105static bool ShouldUpgradeX86Intrinsic(Function *F, StringRef Name) {
106 // All of the intrinsics matches below should be marked with which llvm
107 // version started autoupgrading them. At some point in the future we would
108 // like to use this information to remove upgrade code for some older
109 // intrinsics. It is currently undecided how we will determine that future
110 // point.
111 if (Name == "addcarryx.u32" || // Added in 8.0
112 Name == "addcarryx.u64" || // Added in 8.0
113 Name == "addcarry.u32" || // Added in 8.0
114 Name == "addcarry.u64" || // Added in 8.0
115 Name == "subborrow.u32" || // Added in 8.0
116 Name == "subborrow.u64" || // Added in 8.0
117 Name.startswith("sse2.padds.") || // Added in 8.0
118 Name.startswith("sse2.psubs.") || // Added in 8.0
119 Name.startswith("sse2.paddus.") || // Added in 8.0
120 Name.startswith("sse2.psubus.") || // Added in 8.0
121 Name.startswith("avx2.padds.") || // Added in 8.0
122 Name.startswith("avx2.psubs.") || // Added in 8.0
123 Name.startswith("avx2.paddus.") || // Added in 8.0
124 Name.startswith("avx2.psubus.") || // Added in 8.0
125 Name.startswith("avx512.padds.") || // Added in 8.0
126 Name.startswith("avx512.psubs.") || // Added in 8.0
127 Name.startswith("avx512.mask.padds.") || // Added in 8.0
128 Name.startswith("avx512.mask.psubs.") || // Added in 8.0
129 Name.startswith("avx512.mask.paddus.") || // Added in 8.0
130 Name.startswith("avx512.mask.psubus.") || // Added in 8.0
131 Name=="ssse3.pabs.b.128" || // Added in 6.0
132 Name=="ssse3.pabs.w.128" || // Added in 6.0
133 Name=="ssse3.pabs.d.128" || // Added in 6.0
134 Name.startswith("fma4.vfmadd.s") || // Added in 7.0
135 Name.startswith("fma.vfmadd.") || // Added in 7.0
136 Name.startswith("fma.vfmsub.") || // Added in 7.0
137 Name.startswith("fma.vfmsubadd.") || // Added in 7.0
138 Name.startswith("fma.vfnmadd.") || // Added in 7.0
139 Name.startswith("fma.vfnmsub.") || // Added in 7.0
140 Name.startswith("avx512.mask.vfmadd.") || // Added in 7.0
141 Name.startswith("avx512.mask.vfnmadd.") || // Added in 7.0
142 Name.startswith("avx512.mask.vfnmsub.") || // Added in 7.0
143 Name.startswith("avx512.mask3.vfmadd.") || // Added in 7.0
144 Name.startswith("avx512.maskz.vfmadd.") || // Added in 7.0
145 Name.startswith("avx512.mask3.vfmsub.") || // Added in 7.0
146 Name.startswith("avx512.mask3.vfnmsub.") || // Added in 7.0
147 Name.startswith("avx512.mask.vfmaddsub.") || // Added in 7.0
148 Name.startswith("avx512.maskz.vfmaddsub.") || // Added in 7.0
149 Name.startswith("avx512.mask3.vfmaddsub.") || // Added in 7.0
150 Name.startswith("avx512.mask3.vfmsubadd.") || // Added in 7.0
151 Name.startswith("avx512.mask.shuf.i") || // Added in 6.0
152 Name.startswith("avx512.mask.shuf.f") || // Added in 6.0
153 Name.startswith("avx512.kunpck") || //added in 6.0
154 Name.startswith("avx2.pabs.") || // Added in 6.0
155 Name.startswith("avx512.mask.pabs.") || // Added in 6.0
156 Name.startswith("avx512.broadcastm") || // Added in 6.0
157 Name == "sse.sqrt.ss" || // Added in 7.0
158 Name == "sse2.sqrt.sd" || // Added in 7.0
159 Name.startswith("avx512.mask.sqrt.p") || // Added in 7.0
160 Name.startswith("avx.sqrt.p") || // Added in 7.0
161 Name.startswith("sse2.sqrt.p") || // Added in 7.0
162 Name.startswith("sse.sqrt.p") || // Added in 7.0
163 Name.startswith("avx512.mask.pbroadcast") || // Added in 6.0
164 Name.startswith("sse2.pcmpeq.") || // Added in 3.1
165 Name.startswith("sse2.pcmpgt.") || // Added in 3.1
166 Name.startswith("avx2.pcmpeq.") || // Added in 3.1
167 Name.startswith("avx2.pcmpgt.") || // Added in 3.1
168 Name.startswith("avx512.mask.pcmpeq.") || // Added in 3.9
169 Name.startswith("avx512.mask.pcmpgt.") || // Added in 3.9
170 Name.startswith("avx.vperm2f128.") || // Added in 6.0
171 Name == "avx2.vperm2i128" || // Added in 6.0
172 Name == "sse.add.ss" || // Added in 4.0
173 Name == "sse2.add.sd" || // Added in 4.0
174 Name == "sse.sub.ss" || // Added in 4.0
175 Name == "sse2.sub.sd" || // Added in 4.0
176 Name == "sse.mul.ss" || // Added in 4.0
177 Name == "sse2.mul.sd" || // Added in 4.0
178 Name == "sse.div.ss" || // Added in 4.0
179 Name == "sse2.div.sd" || // Added in 4.0
180 Name == "sse41.pmaxsb" || // Added in 3.9
181 Name == "sse2.pmaxs.w" || // Added in 3.9
182 Name == "sse41.pmaxsd" || // Added in 3.9
183 Name == "sse2.pmaxu.b" || // Added in 3.9
184 Name == "sse41.pmaxuw" || // Added in 3.9
185 Name == "sse41.pmaxud" || // Added in 3.9
186 Name == "sse41.pminsb" || // Added in 3.9
187 Name == "sse2.pmins.w" || // Added in 3.9
188 Name == "sse41.pminsd" || // Added in 3.9
189 Name == "sse2.pminu.b" || // Added in 3.9
190 Name == "sse41.pminuw" || // Added in 3.9
191 Name == "sse41.pminud" || // Added in 3.9
192 Name == "avx512.kand.w" || // Added in 7.0
193 Name == "avx512.kandn.w" || // Added in 7.0
194 Name == "avx512.knot.w" || // Added in 7.0
195 Name == "avx512.kor.w" || // Added in 7.0
196 Name == "avx512.kxor.w" || // Added in 7.0
197 Name == "avx512.kxnor.w" || // Added in 7.0
198 Name == "avx512.kortestc.w" || // Added in 7.0
199 Name == "avx512.kortestz.w" || // Added in 7.0
200 Name.startswith("avx512.mask.pshuf.b.") || // Added in 4.0
201 Name.startswith("avx2.pmax") || // Added in 3.9
202 Name.startswith("avx2.pmin") || // Added in 3.9
203 Name.startswith("avx512.mask.pmax") || // Added in 4.0
204 Name.startswith("avx512.mask.pmin") || // Added in 4.0
205 Name.startswith("avx2.vbroadcast") || // Added in 3.8
206 Name.startswith("avx2.pbroadcast") || // Added in 3.8
207 Name.startswith("avx.vpermil.") || // Added in 3.1
208 Name.startswith("sse2.pshuf") || // Added in 3.9
209 Name.startswith("avx512.pbroadcast") || // Added in 3.9
210 Name.startswith("avx512.mask.broadcast.s") || // Added in 3.9
211 Name.startswith("avx512.mask.movddup") || // Added in 3.9
212 Name.startswith("avx512.mask.movshdup") || // Added in 3.9
213 Name.startswith("avx512.mask.movsldup") || // Added in 3.9
214 Name.startswith("avx512.mask.pshuf.d.") || // Added in 3.9
215 Name.startswith("avx512.mask.pshufl.w.") || // Added in 3.9
216 Name.startswith("avx512.mask.pshufh.w.") || // Added in 3.9
217 Name.startswith("avx512.mask.shuf.p") || // Added in 4.0
218 Name.startswith("avx512.mask.vpermil.p") || // Added in 3.9
219 Name.startswith("avx512.mask.perm.df.") || // Added in 3.9
220 Name.startswith("avx512.mask.perm.di.") || // Added in 3.9
221 Name.startswith("avx512.mask.punpckl") || // Added in 3.9
222 Name.startswith("avx512.mask.punpckh") || // Added in 3.9
223 Name.startswith("avx512.mask.unpckl.") || // Added in 3.9
224 Name.startswith("avx512.mask.unpckh.") || // Added in 3.9
225 Name.startswith("avx512.mask.pand.") || // Added in 3.9
226 Name.startswith("avx512.mask.pandn.") || // Added in 3.9
227 Name.startswith("avx512.mask.por.") || // Added in 3.9
228 Name.startswith("avx512.mask.pxor.") || // Added in 3.9
229 Name.startswith("avx512.mask.and.") || // Added in 3.9
230 Name.startswith("avx512.mask.andn.") || // Added in 3.9
231 Name.startswith("avx512.mask.or.") || // Added in 3.9
232 Name.startswith("avx512.mask.xor.") || // Added in 3.9
233 Name.startswith("avx512.mask.padd.") || // Added in 4.0
234 Name.startswith("avx512.mask.psub.") || // Added in 4.0
235 Name.startswith("avx512.mask.pmull.") || // Added in 4.0
236 Name.startswith("avx512.mask.cvtdq2pd.") || // Added in 4.0
237 Name.startswith("avx512.mask.cvtudq2pd.") || // Added in 4.0
238 Name.startswith("avx512.mask.cvtudq2ps.") || // Added in 7.0 updated 9.0
239 Name.startswith("avx512.mask.cvtqq2pd.") || // Added in 7.0 updated 9.0
240 Name.startswith("avx512.mask.cvtuqq2pd.") || // Added in 7.0 updated 9.0
241 Name.startswith("avx512.mask.cvtdq2ps.") || // Added in 7.0 updated 9.0
242 Name == "avx512.mask.vcvtph2ps.128" || // Added in 11.0
243 Name == "avx512.mask.vcvtph2ps.256" || // Added in 11.0
244 Name == "avx512.mask.cvtqq2ps.256" || // Added in 9.0
245 Name == "avx512.mask.cvtqq2ps.512" || // Added in 9.0
246 Name == "avx512.mask.cvtuqq2ps.256" || // Added in 9.0
247 Name == "avx512.mask.cvtuqq2ps.512" || // Added in 9.0
248 Name == "avx512.mask.cvtpd2dq.256" || // Added in 7.0
249 Name == "avx512.mask.cvtpd2ps.256" || // Added in 7.0
250 Name == "avx512.mask.cvttpd2dq.256" || // Added in 7.0
251 Name == "avx512.mask.cvttps2dq.128" || // Added in 7.0
252 Name == "avx512.mask.cvttps2dq.256" || // Added in 7.0
253 Name == "avx512.mask.cvtps2pd.128" || // Added in 7.0
254 Name == "avx512.mask.cvtps2pd.256" || // Added in 7.0
255 Name == "avx512.cvtusi2sd" || // Added in 7.0
256 Name.startswith("avx512.mask.permvar.") || // Added in 7.0
257 Name == "sse2.pmulu.dq" || // Added in 7.0
258 Name == "sse41.pmuldq" || // Added in 7.0
259 Name == "avx2.pmulu.dq" || // Added in 7.0
260 Name == "avx2.pmul.dq" || // Added in 7.0
261 Name == "avx512.pmulu.dq.512" || // Added in 7.0
262 Name == "avx512.pmul.dq.512" || // Added in 7.0
263 Name.startswith("avx512.mask.pmul.dq.") || // Added in 4.0
264 Name.startswith("avx512.mask.pmulu.dq.") || // Added in 4.0
265 Name.startswith("avx512.mask.pmul.hr.sw.") || // Added in 7.0
266 Name.startswith("avx512.mask.pmulh.w.") || // Added in 7.0
267 Name.startswith("avx512.mask.pmulhu.w.") || // Added in 7.0
268 Name.startswith("avx512.mask.pmaddw.d.") || // Added in 7.0
269 Name.startswith("avx512.mask.pmaddubs.w.") || // Added in 7.0
270 Name.startswith("avx512.mask.packsswb.") || // Added in 5.0
271 Name.startswith("avx512.mask.packssdw.") || // Added in 5.0
272 Name.startswith("avx512.mask.packuswb.") || // Added in 5.0
273 Name.startswith("avx512.mask.packusdw.") || // Added in 5.0
274 Name.startswith("avx512.mask.cmp.b") || // Added in 5.0
275 Name.startswith("avx512.mask.cmp.d") || // Added in 5.0
276 Name.startswith("avx512.mask.cmp.q") || // Added in 5.0
277 Name.startswith("avx512.mask.cmp.w") || // Added in 5.0
278 Name.startswith("avx512.cmp.p") || // Added in 12.0
279 Name.startswith("avx512.mask.ucmp.") || // Added in 5.0
280 Name.startswith("avx512.cvtb2mask.") || // Added in 7.0
281 Name.startswith("avx512.cvtw2mask.") || // Added in 7.0
282 Name.startswith("avx512.cvtd2mask.") || // Added in 7.0
283 Name.startswith("avx512.cvtq2mask.") || // Added in 7.0
284 Name.startswith("avx512.mask.vpermilvar.") || // Added in 4.0
285 Name.startswith("avx512.mask.psll.d") || // Added in 4.0
286 Name.startswith("avx512.mask.psll.q") || // Added in 4.0
287 Name.startswith("avx512.mask.psll.w") || // Added in 4.0
288 Name.startswith("avx512.mask.psra.d") || // Added in 4.0
289 Name.startswith("avx512.mask.psra.q") || // Added in 4.0
290 Name.startswith("avx512.mask.psra.w") || // Added in 4.0
291 Name.startswith("avx512.mask.psrl.d") || // Added in 4.0
292 Name.startswith("avx512.mask.psrl.q") || // Added in 4.0
293 Name.startswith("avx512.mask.psrl.w") || // Added in 4.0
294 Name.startswith("avx512.mask.pslli") || // Added in 4.0
295 Name.startswith("avx512.mask.psrai") || // Added in 4.0
296 Name.startswith("avx512.mask.psrli") || // Added in 4.0
297 Name.startswith("avx512.mask.psllv") || // Added in 4.0
298 Name.startswith("avx512.mask.psrav") || // Added in 4.0
299 Name.startswith("avx512.mask.psrlv") || // Added in 4.0
300 Name.startswith("sse41.pmovsx") || // Added in 3.8
301 Name.startswith("sse41.pmovzx") || // Added in 3.9
302 Name.startswith("avx2.pmovsx") || // Added in 3.9
303 Name.startswith("avx2.pmovzx") || // Added in 3.9
304 Name.startswith("avx512.mask.pmovsx") || // Added in 4.0
305 Name.startswith("avx512.mask.pmovzx") || // Added in 4.0
306 Name.startswith("avx512.mask.lzcnt.") || // Added in 5.0
307 Name.startswith("avx512.mask.pternlog.") || // Added in 7.0
308 Name.startswith("avx512.maskz.pternlog.") || // Added in 7.0
309 Name.startswith("avx512.mask.vpmadd52") || // Added in 7.0
310 Name.startswith("avx512.maskz.vpmadd52") || // Added in 7.0
311 Name.startswith("avx512.mask.vpermi2var.") || // Added in 7.0
312 Name.startswith("avx512.mask.vpermt2var.") || // Added in 7.0
313 Name.startswith("avx512.maskz.vpermt2var.") || // Added in 7.0
314 Name.startswith("avx512.mask.vpdpbusd.") || // Added in 7.0
315 Name.startswith("avx512.maskz.vpdpbusd.") || // Added in 7.0
316 Name.startswith("avx512.mask.vpdpbusds.") || // Added in 7.0
317 Name.startswith("avx512.maskz.vpdpbusds.") || // Added in 7.0
318 Name.startswith("avx512.mask.vpdpwssd.") || // Added in 7.0
319 Name.startswith("avx512.maskz.vpdpwssd.") || // Added in 7.0
320 Name.startswith("avx512.mask.vpdpwssds.") || // Added in 7.0
321 Name.startswith("avx512.maskz.vpdpwssds.") || // Added in 7.0
322 Name.startswith("avx512.mask.dbpsadbw.") || // Added in 7.0
323 Name.startswith("avx512.mask.vpshld.") || // Added in 7.0
324 Name.startswith("avx512.mask.vpshrd.") || // Added in 7.0
325 Name.startswith("avx512.mask.vpshldv.") || // Added in 8.0
326 Name.startswith("avx512.mask.vpshrdv.") || // Added in 8.0
327 Name.startswith("avx512.maskz.vpshldv.") || // Added in 8.0
328 Name.startswith("avx512.maskz.vpshrdv.") || // Added in 8.0
329 Name.startswith("avx512.vpshld.") || // Added in 8.0
330 Name.startswith("avx512.vpshrd.") || // Added in 8.0
331 Name.startswith("avx512.mask.add.p") || // Added in 7.0. 128/256 in 4.0
332 Name.startswith("avx512.mask.sub.p") || // Added in 7.0. 128/256 in 4.0
333 Name.startswith("avx512.mask.mul.p") || // Added in 7.0. 128/256 in 4.0
334 Name.startswith("avx512.mask.div.p") || // Added in 7.0. 128/256 in 4.0
335 Name.startswith("avx512.mask.max.p") || // Added in 7.0. 128/256 in 5.0
336 Name.startswith("avx512.mask.min.p") || // Added in 7.0. 128/256 in 5.0
337 Name.startswith("avx512.mask.fpclass.p") || // Added in 7.0
338 Name.startswith("avx512.mask.vpshufbitqmb.") || // Added in 8.0
339 Name.startswith("avx512.mask.pmultishift.qb.") || // Added in 8.0
340 Name.startswith("avx512.mask.conflict.") || // Added in 9.0
341 Name == "avx512.mask.pmov.qd.256" || // Added in 9.0
342 Name == "avx512.mask.pmov.qd.512" || // Added in 9.0
343 Name == "avx512.mask.pmov.wb.256" || // Added in 9.0
344 Name == "avx512.mask.pmov.wb.512" || // Added in 9.0
345 Name == "sse.cvtsi2ss" || // Added in 7.0
346 Name == "sse.cvtsi642ss" || // Added in 7.0
347 Name == "sse2.cvtsi2sd" || // Added in 7.0
348 Name == "sse2.cvtsi642sd" || // Added in 7.0
349 Name == "sse2.cvtss2sd" || // Added in 7.0
350 Name == "sse2.cvtdq2pd" || // Added in 3.9
351 Name == "sse2.cvtdq2ps" || // Added in 7.0
352 Name == "sse2.cvtps2pd" || // Added in 3.9
353 Name == "avx.cvtdq2.pd.256" || // Added in 3.9
354 Name == "avx.cvtdq2.ps.256" || // Added in 7.0
355 Name == "avx.cvt.ps2.pd.256" || // Added in 3.9
356 Name.startswith("vcvtph2ps.") || // Added in 11.0
357 Name.startswith("avx.vinsertf128.") || // Added in 3.7
358 Name == "avx2.vinserti128" || // Added in 3.7
359 Name.startswith("avx512.mask.insert") || // Added in 4.0
360 Name.startswith("avx.vextractf128.") || // Added in 3.7
361 Name == "avx2.vextracti128" || // Added in 3.7
362 Name.startswith("avx512.mask.vextract") || // Added in 4.0
363 Name.startswith("sse4a.movnt.") || // Added in 3.9
364 Name.startswith("avx.movnt.") || // Added in 3.2
365 Name.startswith("avx512.storent.") || // Added in 3.9
366 Name == "sse41.movntdqa" || // Added in 5.0
367 Name == "avx2.movntdqa" || // Added in 5.0
368 Name == "avx512.movntdqa" || // Added in 5.0
369 Name == "sse2.storel.dq" || // Added in 3.9
370 Name.startswith("sse.storeu.") || // Added in 3.9
371 Name.startswith("sse2.storeu.") || // Added in 3.9
372 Name.startswith("avx.storeu.") || // Added in 3.9
373 Name.startswith("avx512.mask.storeu.") || // Added in 3.9
374 Name.startswith("avx512.mask.store.p") || // Added in 3.9
375 Name.startswith("avx512.mask.store.b.") || // Added in 3.9
376 Name.startswith("avx512.mask.store.w.") || // Added in 3.9
377 Name.startswith("avx512.mask.store.d.") || // Added in 3.9
378 Name.startswith("avx512.mask.store.q.") || // Added in 3.9
379 Name == "avx512.mask.store.ss" || // Added in 7.0
380 Name.startswith("avx512.mask.loadu.") || // Added in 3.9
381 Name.startswith("avx512.mask.load.") || // Added in 3.9
382 Name.startswith("avx512.mask.expand.load.") || // Added in 7.0
383 Name.startswith("avx512.mask.compress.store.") || // Added in 7.0
384 Name.startswith("avx512.mask.expand.b") || // Added in 9.0
385 Name.startswith("avx512.mask.expand.w") || // Added in 9.0
386 Name.startswith("avx512.mask.expand.d") || // Added in 9.0
387 Name.startswith("avx512.mask.expand.q") || // Added in 9.0
388 Name.startswith("avx512.mask.expand.p") || // Added in 9.0
389 Name.startswith("avx512.mask.compress.b") || // Added in 9.0
390 Name.startswith("avx512.mask.compress.w") || // Added in 9.0
391 Name.startswith("avx512.mask.compress.d") || // Added in 9.0
392 Name.startswith("avx512.mask.compress.q") || // Added in 9.0
393 Name.startswith("avx512.mask.compress.p") || // Added in 9.0
394 Name == "sse42.crc32.64.8" || // Added in 3.4
395 Name.startswith("avx.vbroadcast.s") || // Added in 3.5
396 Name.startswith("avx512.vbroadcast.s") || // Added in 7.0
397 Name.startswith("avx512.mask.palignr.") || // Added in 3.9
398 Name.startswith("avx512.mask.valign.") || // Added in 4.0
399 Name.startswith("sse2.psll.dq") || // Added in 3.7
400 Name.startswith("sse2.psrl.dq") || // Added in 3.7
401 Name.startswith("avx2.psll.dq") || // Added in 3.7
402 Name.startswith("avx2.psrl.dq") || // Added in 3.7
403 Name.startswith("avx512.psll.dq") || // Added in 3.9
404 Name.startswith("avx512.psrl.dq") || // Added in 3.9
405 Name == "sse41.pblendw" || // Added in 3.7
406 Name.startswith("sse41.blendp") || // Added in 3.7
407 Name.startswith("avx.blend.p") || // Added in 3.7
408 Name == "avx2.pblendw" || // Added in 3.7
409 Name.startswith("avx2.pblendd.") || // Added in 3.7
410 Name.startswith("avx.vbroadcastf128") || // Added in 4.0
411 Name == "avx2.vbroadcasti128" || // Added in 3.7
412 Name.startswith("avx512.mask.broadcastf32x4.") || // Added in 6.0
413 Name.startswith("avx512.mask.broadcastf64x2.") || // Added in 6.0
414 Name.startswith("avx512.mask.broadcastf32x8.") || // Added in 6.0
415 Name.startswith("avx512.mask.broadcastf64x4.") || // Added in 6.0
416 Name.startswith("avx512.mask.broadcasti32x4.") || // Added in 6.0
417 Name.startswith("avx512.mask.broadcasti64x2.") || // Added in 6.0
418 Name.startswith("avx512.mask.broadcasti32x8.") || // Added in 6.0
419 Name.startswith("avx512.mask.broadcasti64x4.") || // Added in 6.0
420 Name == "xop.vpcmov" || // Added in 3.8
421 Name == "xop.vpcmov.256" || // Added in 5.0
422 Name.startswith("avx512.mask.move.s") || // Added in 4.0
423 Name.startswith("avx512.cvtmask2") || // Added in 5.0
424 Name.startswith("xop.vpcom") || // Added in 3.2, Updated in 9.0
425 Name.startswith("xop.vprot") || // Added in 8.0
426 Name.startswith("avx512.prol") || // Added in 8.0
427 Name.startswith("avx512.pror") || // Added in 8.0
428 Name.startswith("avx512.mask.prorv.") || // Added in 8.0
429 Name.startswith("avx512.mask.pror.") || // Added in 8.0
430 Name.startswith("avx512.mask.prolv.") || // Added in 8.0
431 Name.startswith("avx512.mask.prol.") || // Added in 8.0
432 Name.startswith("avx512.ptestm") || //Added in 6.0
433 Name.startswith("avx512.ptestnm") || //Added in 6.0
434 Name.startswith("avx512.mask.pavg")) // Added in 6.0
435 return true;
436
437 return false;
438}
439
440static bool UpgradeX86IntrinsicFunction(Function *F, StringRef Name,
441 Function *&NewFn) {
442 // Only handle intrinsics that start with "x86.".
443 if (!Name.startswith("x86."))
444 return false;
445 // Remove "x86." prefix.
446 Name = Name.substr(4);
447
448 if (ShouldUpgradeX86Intrinsic(F, Name)) {
449 NewFn = nullptr;
450 return true;
451 }
452
453 if (Name == "rdtscp") { // Added in 8.0
454 // If this intrinsic has 0 operands, it's the new version.
455 if (F->getFunctionType()->getNumParams() == 0)
456 return false;
457
458 rename(F);
459 NewFn = Intrinsic::getDeclaration(F->getParent(),
460 Intrinsic::x86_rdtscp);
461 return true;
462 }
463
464 // SSE4.1 ptest functions may have an old signature.
465 if (Name.startswith("sse41.ptest")) { // Added in 3.2
466 if (Name.substr(11) == "c")
467 return UpgradePTESTIntrinsic(F, Intrinsic::x86_sse41_ptestc, NewFn);
468 if (Name.substr(11) == "z")
469 return UpgradePTESTIntrinsic(F, Intrinsic::x86_sse41_ptestz, NewFn);
470 if (Name.substr(11) == "nzc")
471 return UpgradePTESTIntrinsic(F, Intrinsic::x86_sse41_ptestnzc, NewFn);
472 }
473 // Several blend and other instructions with masks used the wrong number of
474 // bits.
475 if (Name == "sse41.insertps") // Added in 3.6
476 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_insertps,
477 NewFn);
478 if (Name == "sse41.dppd") // Added in 3.6
479 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_dppd,
480 NewFn);
481 if (Name == "sse41.dpps") // Added in 3.6
482 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_dpps,
483 NewFn);
484 if (Name == "sse41.mpsadbw") // Added in 3.6
485 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_mpsadbw,
486 NewFn);
487 if (Name == "avx.dp.ps.256") // Added in 3.6
488 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_avx_dp_ps_256,
489 NewFn);
490 if (Name == "avx2.mpsadbw") // Added in 3.6
491 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_avx2_mpsadbw,
492 NewFn);
493 if (Name == "avx512.mask.cmp.pd.128") // Added in 7.0
494 return UpgradeX86MaskedFPCompare(F, Intrinsic::x86_avx512_mask_cmp_pd_128,
495 NewFn);
496 if (Name == "avx512.mask.cmp.pd.256") // Added in 7.0
497 return UpgradeX86MaskedFPCompare(F, Intrinsic::x86_avx512_mask_cmp_pd_256,
498 NewFn);
499 if (Name == "avx512.mask.cmp.pd.512") // Added in 7.0
500 return UpgradeX86MaskedFPCompare(F, Intrinsic::x86_avx512_mask_cmp_pd_512,
501 NewFn);
502 if (Name == "avx512.mask.cmp.ps.128") // Added in 7.0
503 return UpgradeX86MaskedFPCompare(F, Intrinsic::x86_avx512_mask_cmp_ps_128,
504 NewFn);
505 if (Name == "avx512.mask.cmp.ps.256") // Added in 7.0
506 return UpgradeX86MaskedFPCompare(F, Intrinsic::x86_avx512_mask_cmp_ps_256,
507 NewFn);
508 if (Name == "avx512.mask.cmp.ps.512") // Added in 7.0
509 return UpgradeX86MaskedFPCompare(F, Intrinsic::x86_avx512_mask_cmp_ps_512,
510 NewFn);
511 if (Name == "avx512bf16.cvtne2ps2bf16.128") // Added in 9.0
512 return UpgradeX86BF16Intrinsic(
513 F, Intrinsic::x86_avx512bf16_cvtne2ps2bf16_128, NewFn);
514 if (Name == "avx512bf16.cvtne2ps2bf16.256") // Added in 9.0
515 return UpgradeX86BF16Intrinsic(
516 F, Intrinsic::x86_avx512bf16_cvtne2ps2bf16_256, NewFn);
517 if (Name == "avx512bf16.cvtne2ps2bf16.512") // Added in 9.0
518 return UpgradeX86BF16Intrinsic(
519 F, Intrinsic::x86_avx512bf16_cvtne2ps2bf16_512, NewFn);
520 if (Name == "avx512bf16.mask.cvtneps2bf16.128") // Added in 9.0
521 return UpgradeX86BF16Intrinsic(
522 F, Intrinsic::x86_avx512bf16_mask_cvtneps2bf16_128, NewFn);
523 if (Name == "avx512bf16.cvtneps2bf16.256") // Added in 9.0
524 return UpgradeX86BF16Intrinsic(
525 F, Intrinsic::x86_avx512bf16_cvtneps2bf16_256, NewFn);
526 if (Name == "avx512bf16.cvtneps2bf16.512") // Added in 9.0
527 return UpgradeX86BF16Intrinsic(
528 F, Intrinsic::x86_avx512bf16_cvtneps2bf16_512, NewFn);
529 if (Name == "avx512bf16.dpbf16ps.128") // Added in 9.0
530 return UpgradeX86BF16DPIntrinsic(
531 F, Intrinsic::x86_avx512bf16_dpbf16ps_128, NewFn);
532 if (Name == "avx512bf16.dpbf16ps.256") // Added in 9.0
533 return UpgradeX86BF16DPIntrinsic(
534 F, Intrinsic::x86_avx512bf16_dpbf16ps_256, NewFn);
535 if (Name == "avx512bf16.dpbf16ps.512") // Added in 9.0
536 return UpgradeX86BF16DPIntrinsic(
537 F, Intrinsic::x86_avx512bf16_dpbf16ps_512, NewFn);
538
539 // frcz.ss/sd may need to have an argument dropped. Added in 3.2
540 if (Name.startswith("xop.vfrcz.ss") && F->arg_size() == 2) {
541 rename(F);
542 NewFn = Intrinsic::getDeclaration(F->getParent(),
543 Intrinsic::x86_xop_vfrcz_ss);
544 return true;
545 }
546 if (Name.startswith("xop.vfrcz.sd") && F->arg_size() == 2) {
547 rename(F);
548 NewFn = Intrinsic::getDeclaration(F->getParent(),
549 Intrinsic::x86_xop_vfrcz_sd);
550 return true;
551 }
552 // Upgrade any XOP PERMIL2 index operand still using a float/double vector.
553 if (Name.startswith("xop.vpermil2")) { // Added in 3.9
554 auto Idx = F->getFunctionType()->getParamType(2);
555 if (Idx->isFPOrFPVectorTy()) {
556 rename(F);
557 unsigned IdxSize = Idx->getPrimitiveSizeInBits();
558 unsigned EltSize = Idx->getScalarSizeInBits();
559 Intrinsic::ID Permil2ID;
560 if (EltSize == 64 && IdxSize == 128)
561 Permil2ID = Intrinsic::x86_xop_vpermil2pd;
562 else if (EltSize == 32 && IdxSize == 128)
563 Permil2ID = Intrinsic::x86_xop_vpermil2ps;
564 else if (EltSize == 64 && IdxSize == 256)
565 Permil2ID = Intrinsic::x86_xop_vpermil2pd_256;
566 else
567 Permil2ID = Intrinsic::x86_xop_vpermil2ps_256;
568 NewFn = Intrinsic::getDeclaration(F->getParent(), Permil2ID);
569 return true;
570 }
571 }
572
573 if (Name == "seh.recoverfp") {
574 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::eh_recoverfp);
575 return true;
576 }
577
578 return false;
579}
580
581static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
582 assert(F && "Illegal to upgrade a non-existent Function.")(static_cast <bool> (F && "Illegal to upgrade a non-existent Function."
) ? void (0) : __assert_fail ("F && \"Illegal to upgrade a non-existent Function.\""
, "llvm/lib/IR/AutoUpgrade.cpp", 582, __extension__ __PRETTY_FUNCTION__
))
;
583
584 // Quickly eliminate it, if it's not a candidate.
585 StringRef Name = F->getName();
586 if (Name.size() <= 7 || !Name.startswith("llvm."))
587 return false;
588 Name = Name.substr(5); // Strip off "llvm."
589
590 switch (Name[0]) {
591 default: break;
592 case 'a': {
593 if (Name.startswith("arm.rbit") || Name.startswith("aarch64.rbit")) {
594 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::bitreverse,
595 F->arg_begin()->getType());
596 return true;
597 }
598 if (Name.startswith("aarch64.neon.frintn")) {
599 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::roundeven,
600 F->arg_begin()->getType());
601 return true;
602 }
603 if (Name.startswith("aarch64.neon.rbit")) {
604 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::bitreverse,
605 F->arg_begin()->getType());
606 return true;
607 }
608 static const Regex LdRegex("^aarch64\\.sve\\.ld[234](.nxv[a-z0-9]+|$)");
609 if (LdRegex.match(Name)) {
610 Type *ScalarTy =
611 dyn_cast<VectorType>(F->getReturnType())->getElementType();
612 ElementCount EC =
613 dyn_cast<VectorType>(F->arg_begin()->getType())->getElementCount();
614 Type *Ty = VectorType::get(ScalarTy, EC);
615 Intrinsic::ID ID =
616 StringSwitch<Intrinsic::ID>(Name)
617 .StartsWith("aarch64.sve.ld2", Intrinsic::aarch64_sve_ld2_sret)
618 .StartsWith("aarch64.sve.ld3", Intrinsic::aarch64_sve_ld3_sret)
619 .StartsWith("aarch64.sve.ld4", Intrinsic::aarch64_sve_ld4_sret)
620 .Default(Intrinsic::not_intrinsic);
621 NewFn = Intrinsic::getDeclaration(F->getParent(), ID, Ty);
622 return true;
623 }
624 if (Name.startswith("aarch64.sve.tuple.get")) {
625 Type *Tys[] = {F->getReturnType(), F->arg_begin()->getType()};
626 NewFn = Intrinsic::getDeclaration(F->getParent(),
627 Intrinsic::vector_extract, Tys);
628 return true;
629 }
630 if (Name.startswith("aarch64.sve.tuple.set")) {
631 auto Args = F->getFunctionType()->params();
632 Type *Tys[] = {Args[0], Args[2], Args[1]};
633 NewFn = Intrinsic::getDeclaration(F->getParent(),
634 Intrinsic::vector_insert, Tys);
635 return true;
636 }
637 static const Regex CreateTupleRegex(
638 "^aarch64\\.sve\\.tuple\\.create[234](.nxv[a-z0-9]+|$)");
639 if (CreateTupleRegex.match(Name)) {
640 auto Args = F->getFunctionType()->params();
641 Type *Tys[] = {F->getReturnType(), Args[1]};
642 NewFn = Intrinsic::getDeclaration(F->getParent(),
643 Intrinsic::vector_insert, Tys);
644 return true;
645 }
646 if (Name.startswith("arm.neon.vclz")) {
647 Type* args[2] = {
648 F->arg_begin()->getType(),
649 Type::getInt1Ty(F->getContext())
650 };
651 // Can't use Intrinsic::getDeclaration here as it adds a ".i1" to
652 // the end of the name. Change name from llvm.arm.neon.vclz.* to
653 // llvm.ctlz.*
654 FunctionType* fType = FunctionType::get(F->getReturnType(), args, false);
655 NewFn = Function::Create(fType, F->getLinkage(), F->getAddressSpace(),
656 "llvm.ctlz." + Name.substr(14), F->getParent());
657 return true;
658 }
659 if (Name.startswith("arm.neon.vcnt")) {
660 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctpop,
661 F->arg_begin()->getType());
662 return true;
663 }
664 static const Regex vstRegex("^arm\\.neon\\.vst([1234]|[234]lane)\\.v[a-z0-9]*$");
665 if (vstRegex.match(Name)) {
666 static const Intrinsic::ID StoreInts[] = {Intrinsic::arm_neon_vst1,
667 Intrinsic::arm_neon_vst2,
668 Intrinsic::arm_neon_vst3,
669 Intrinsic::arm_neon_vst4};
670
671 static const Intrinsic::ID StoreLaneInts[] = {
672 Intrinsic::arm_neon_vst2lane, Intrinsic::arm_neon_vst3lane,
673 Intrinsic::arm_neon_vst4lane
674 };
675
676 auto fArgs = F->getFunctionType()->params();
677 Type *Tys[] = {fArgs[0], fArgs[1]};
678 if (!Name.contains("lane"))
679 NewFn = Intrinsic::getDeclaration(F->getParent(),
680 StoreInts[fArgs.size() - 3], Tys);
681 else
682 NewFn = Intrinsic::getDeclaration(F->getParent(),
683 StoreLaneInts[fArgs.size() - 5], Tys);
684 return true;
685 }
686 if (Name == "aarch64.thread.pointer" || Name == "arm.thread.pointer") {
687 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::thread_pointer);
688 return true;
689 }
690 if (Name.startswith("arm.neon.vqadds.")) {
691 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::sadd_sat,
692 F->arg_begin()->getType());
693 return true;
694 }
695 if (Name.startswith("arm.neon.vqaddu.")) {
696 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::uadd_sat,
697 F->arg_begin()->getType());
698 return true;
699 }
700 if (Name.startswith("arm.neon.vqsubs.")) {
701 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ssub_sat,
702 F->arg_begin()->getType());
703 return true;
704 }
705 if (Name.startswith("arm.neon.vqsubu.")) {
706 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::usub_sat,
707 F->arg_begin()->getType());
708 return true;
709 }
710 if (Name.startswith("aarch64.neon.addp")) {
711 if (F->arg_size() != 2)
712 break; // Invalid IR.
713 VectorType *Ty = dyn_cast<VectorType>(F->getReturnType());
714 if (Ty && Ty->getElementType()->isFloatingPointTy()) {
715 NewFn = Intrinsic::getDeclaration(F->getParent(),
716 Intrinsic::aarch64_neon_faddp, Ty);
717 return true;
718 }
719 }
720
721 // Changed in 12.0: bfdot accept v4bf16 and v8bf16 instead of v8i8 and v16i8
722 // respectively
723 if ((Name.startswith("arm.neon.bfdot.") ||
724 Name.startswith("aarch64.neon.bfdot.")) &&
725 Name.endswith("i8")) {
726 Intrinsic::ID IID =
727 StringSwitch<Intrinsic::ID>(Name)
728 .Cases("arm.neon.bfdot.v2f32.v8i8",
729 "arm.neon.bfdot.v4f32.v16i8",
730 Intrinsic::arm_neon_bfdot)
731 .Cases("aarch64.neon.bfdot.v2f32.v8i8",
732 "aarch64.neon.bfdot.v4f32.v16i8",
733 Intrinsic::aarch64_neon_bfdot)
734 .Default(Intrinsic::not_intrinsic);
735 if (IID == Intrinsic::not_intrinsic)
736 break;
737
738 size_t OperandWidth = F->getReturnType()->getPrimitiveSizeInBits();
739 assert((OperandWidth == 64 || OperandWidth == 128) &&(static_cast <bool> ((OperandWidth == 64 || OperandWidth
== 128) && "Unexpected operand width") ? void (0) : __assert_fail
("(OperandWidth == 64 || OperandWidth == 128) && \"Unexpected operand width\""
, "llvm/lib/IR/AutoUpgrade.cpp", 740, __extension__ __PRETTY_FUNCTION__
))
740 "Unexpected operand width")(static_cast <bool> ((OperandWidth == 64 || OperandWidth
== 128) && "Unexpected operand width") ? void (0) : __assert_fail
("(OperandWidth == 64 || OperandWidth == 128) && \"Unexpected operand width\""
, "llvm/lib/IR/AutoUpgrade.cpp", 740, __extension__ __PRETTY_FUNCTION__
))
;
741 LLVMContext &Ctx = F->getParent()->getContext();
742 std::array<Type *, 2> Tys {{
743 F->getReturnType(),
744 FixedVectorType::get(Type::getBFloatTy(Ctx), OperandWidth / 16)
745 }};
746 NewFn = Intrinsic::getDeclaration(F->getParent(), IID, Tys);
747 return true;
748 }
749
750 // Changed in 12.0: bfmmla, bfmlalb and bfmlalt are not polymorphic anymore
751 // and accept v8bf16 instead of v16i8
752 if ((Name.startswith("arm.neon.bfm") ||
753 Name.startswith("aarch64.neon.bfm")) &&
754 Name.endswith(".v4f32.v16i8")) {
755 Intrinsic::ID IID =
756 StringSwitch<Intrinsic::ID>(Name)
757 .Case("arm.neon.bfmmla.v4f32.v16i8",
758 Intrinsic::arm_neon_bfmmla)
759 .Case("arm.neon.bfmlalb.v4f32.v16i8",
760 Intrinsic::arm_neon_bfmlalb)
761 .Case("arm.neon.bfmlalt.v4f32.v16i8",
762 Intrinsic::arm_neon_bfmlalt)
763 .Case("aarch64.neon.bfmmla.v4f32.v16i8",
764 Intrinsic::aarch64_neon_bfmmla)
765 .Case("aarch64.neon.bfmlalb.v4f32.v16i8",
766 Intrinsic::aarch64_neon_bfmlalb)
767 .Case("aarch64.neon.bfmlalt.v4f32.v16i8",
768 Intrinsic::aarch64_neon_bfmlalt)
769 .Default(Intrinsic::not_intrinsic);
770 if (IID == Intrinsic::not_intrinsic)
771 break;
772
773 std::array<Type *, 0> Tys;
774 NewFn = Intrinsic::getDeclaration(F->getParent(), IID, Tys);
775 return true;
776 }
777
778 if (Name == "arm.mve.vctp64" &&
779 cast<FixedVectorType>(F->getReturnType())->getNumElements() == 4) {
780 // A vctp64 returning a v4i1 is converted to return a v2i1. Rename the
781 // function and deal with it below in UpgradeIntrinsicCall.
782 rename(F);
783 return true;
784 }
785 // These too are changed to accept a v2i1 insteead of the old v4i1.
786 if (Name == "arm.mve.mull.int.predicated.v2i64.v4i32.v4i1" ||
787 Name == "arm.mve.vqdmull.predicated.v2i64.v4i32.v4i1" ||
788 Name == "arm.mve.vldr.gather.base.predicated.v2i64.v2i64.v4i1" ||
789 Name == "arm.mve.vldr.gather.base.wb.predicated.v2i64.v2i64.v4i1" ||
790 Name == "arm.mve.vldr.gather.offset.predicated.v2i64.p0i64.v2i64.v4i1" ||
791 Name == "arm.mve.vstr.scatter.base.predicated.v2i64.v2i64.v4i1" ||
792 Name == "arm.mve.vstr.scatter.base.wb.predicated.v2i64.v2i64.v4i1" ||
793 Name == "arm.mve.vstr.scatter.offset.predicated.p0i64.v2i64.v2i64.v4i1" ||
794 Name == "arm.cde.vcx1q.predicated.v2i64.v4i1" ||
795 Name == "arm.cde.vcx1qa.predicated.v2i64.v4i1" ||
796 Name == "arm.cde.vcx2q.predicated.v2i64.v4i1" ||
797 Name == "arm.cde.vcx2qa.predicated.v2i64.v4i1" ||
798 Name == "arm.cde.vcx3q.predicated.v2i64.v4i1" ||
799 Name == "arm.cde.vcx3qa.predicated.v2i64.v4i1")
800 return true;
801
802 if (Name == "amdgcn.alignbit") {
803 // Target specific intrinsic became redundant
804 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::fshr,
805 {F->getReturnType()});
806 return true;
807 }
808
809 break;
810 }
811
812 case 'c': {
813 if (Name.startswith("ctlz.") && F->arg_size() == 1) {
814 rename(F);
815 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctlz,
816 F->arg_begin()->getType());
817 return true;
818 }
819 if (Name.startswith("cttz.") && F->arg_size() == 1) {
820 rename(F);
821 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::cttz,
822 F->arg_begin()->getType());
823 return true;
824 }
825 break;
826 }
827 case 'd': {
828 if (Name == "dbg.value" && F->arg_size() == 4) {
829 rename(F);
830 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::dbg_value);
831 return true;
832 }
833 break;
834 }
835 case 'e': {
836 if (Name.startswith("experimental.vector.extract.")) {
837 rename(F);
838 Type *Tys[] = {F->getReturnType(), F->arg_begin()->getType()};
839 NewFn = Intrinsic::getDeclaration(F->getParent(),
840 Intrinsic::vector_extract, Tys);
841 return true;
842 }
843
844 if (Name.startswith("experimental.vector.insert.")) {
845 rename(F);
846 auto Args = F->getFunctionType()->params();
847 Type *Tys[] = {Args[0], Args[1]};
848 NewFn = Intrinsic::getDeclaration(F->getParent(),
849 Intrinsic::vector_insert, Tys);
850 return true;
851 }
852
853 SmallVector<StringRef, 2> Groups;
854 static const Regex R("^experimental.vector.reduce.([a-z]+)\\.[a-z][0-9]+");
855 if (R.match(Name, &Groups)) {
856 Intrinsic::ID ID;
857 ID = StringSwitch<Intrinsic::ID>(Groups[1])
858 .Case("add", Intrinsic::vector_reduce_add)
859 .Case("mul", Intrinsic::vector_reduce_mul)
860 .Case("and", Intrinsic::vector_reduce_and)
861 .Case("or", Intrinsic::vector_reduce_or)
862 .Case("xor", Intrinsic::vector_reduce_xor)
863 .Case("smax", Intrinsic::vector_reduce_smax)
864 .Case("smin", Intrinsic::vector_reduce_smin)
865 .Case("umax", Intrinsic::vector_reduce_umax)
866 .Case("umin", Intrinsic::vector_reduce_umin)
867 .Case("fmax", Intrinsic::vector_reduce_fmax)
868 .Case("fmin", Intrinsic::vector_reduce_fmin)
869 .Default(Intrinsic::not_intrinsic);
870 if (ID != Intrinsic::not_intrinsic) {
871 rename(F);
872 auto Args = F->getFunctionType()->params();
873 NewFn = Intrinsic::getDeclaration(F->getParent(), ID, {Args[0]});
874 return true;
875 }
876 }
877 static const Regex R2(
878 "^experimental.vector.reduce.v2.([a-z]+)\\.[fi][0-9]+");
879 Groups.clear();
880 if (R2.match(Name, &Groups)) {
881 Intrinsic::ID ID = Intrinsic::not_intrinsic;
882 if (Groups[1] == "fadd")
883 ID = Intrinsic::vector_reduce_fadd;
884 if (Groups[1] == "fmul")
885 ID = Intrinsic::vector_reduce_fmul;
886 if (ID != Intrinsic::not_intrinsic) {
887 rename(F);
888 auto Args = F->getFunctionType()->params();
889 Type *Tys[] = {Args[1]};
890 NewFn = Intrinsic::getDeclaration(F->getParent(), ID, Tys);
891 return true;
892 }
893 }
894 break;
895 }
896 case 'i':
897 case 'l': {
898 bool IsLifetimeStart = Name.startswith("lifetime.start");
899 if (IsLifetimeStart || Name.startswith("invariant.start")) {
900 Intrinsic::ID ID = IsLifetimeStart ?
901 Intrinsic::lifetime_start : Intrinsic::invariant_start;
902 auto Args = F->getFunctionType()->params();
903 Type* ObjectPtr[1] = {Args[1]};
904 if (F->getName() != Intrinsic::getName(ID, ObjectPtr, F->getParent())) {
905 rename(F);
906 NewFn = Intrinsic::getDeclaration(F->getParent(), ID, ObjectPtr);
907 return true;
908 }
909 }
910
911 bool IsLifetimeEnd = Name.startswith("lifetime.end");
912 if (IsLifetimeEnd || Name.startswith("invariant.end")) {
913 Intrinsic::ID ID = IsLifetimeEnd ?
914 Intrinsic::lifetime_end : Intrinsic::invariant_end;
915
916 auto Args = F->getFunctionType()->params();
917 Type* ObjectPtr[1] = {Args[IsLifetimeEnd ? 1 : 2]};
918 if (F->getName() != Intrinsic::getName(ID, ObjectPtr, F->getParent())) {
919 rename(F);
920 NewFn = Intrinsic::getDeclaration(F->getParent(), ID, ObjectPtr);
921 return true;
922 }
923 }
924 if (Name.startswith("invariant.group.barrier")) {
925 // Rename invariant.group.barrier to launder.invariant.group
926 auto Args = F->getFunctionType()->params();
927 Type* ObjectPtr[1] = {Args[0]};
928 rename(F);
929 NewFn = Intrinsic::getDeclaration(F->getParent(),
930 Intrinsic::launder_invariant_group, ObjectPtr);
931 return true;
932
933 }
934
935 break;
936 }
937 case 'm': {
938 if (Name.startswith("masked.load.")) {
939 Type *Tys[] = { F->getReturnType(), F->arg_begin()->getType() };
940 if (F->getName() !=
941 Intrinsic::getName(Intrinsic::masked_load, Tys, F->getParent())) {
942 rename(F);
943 NewFn = Intrinsic::getDeclaration(F->getParent(),
944 Intrinsic::masked_load,
945 Tys);
946 return true;
947 }
948 }
949 if (Name.startswith("masked.store.")) {
950 auto Args = F->getFunctionType()->params();
951 Type *Tys[] = { Args[0], Args[1] };
952 if (F->getName() !=
953 Intrinsic::getName(Intrinsic::masked_store, Tys, F->getParent())) {
954 rename(F);
955 NewFn = Intrinsic::getDeclaration(F->getParent(),
956 Intrinsic::masked_store,
957 Tys);
958 return true;
959 }
960 }
961 // Renaming gather/scatter intrinsics with no address space overloading
962 // to the new overload which includes an address space
963 if (Name.startswith("masked.gather.")) {
964 Type *Tys[] = {F->getReturnType(), F->arg_begin()->getType()};
965 if (F->getName() !=
966 Intrinsic::getName(Intrinsic::masked_gather, Tys, F->getParent())) {
967 rename(F);
968 NewFn = Intrinsic::getDeclaration(F->getParent(),
969 Intrinsic::masked_gather, Tys);
970 return true;
971 }
972 }
973 if (Name.startswith("masked.scatter.")) {
974 auto Args = F->getFunctionType()->params();
975 Type *Tys[] = {Args[0], Args[1]};
976 if (F->getName() !=
977 Intrinsic::getName(Intrinsic::masked_scatter, Tys, F->getParent())) {
978 rename(F);
979 NewFn = Intrinsic::getDeclaration(F->getParent(),
980 Intrinsic::masked_scatter, Tys);
981 return true;
982 }
983 }
984 // Updating the memory intrinsics (memcpy/memmove/memset) that have an
985 // alignment parameter to embedding the alignment as an attribute of
986 // the pointer args.
987 if (Name.startswith("memcpy.") && F->arg_size() == 5) {
988 rename(F);
989 // Get the types of dest, src, and len
990 ArrayRef<Type *> ParamTypes = F->getFunctionType()->params().slice(0, 3);
991 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::memcpy,
992 ParamTypes);
993 return true;
994 }
995 if (Name.startswith("memmove.") && F->arg_size() == 5) {
996 rename(F);
997 // Get the types of dest, src, and len
998 ArrayRef<Type *> ParamTypes = F->getFunctionType()->params().slice(0, 3);
999 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::memmove,
1000 ParamTypes);
1001 return true;
1002 }
1003 if (Name.startswith("memset.") && F->arg_size() == 5) {
1004 rename(F);
1005 // Get the types of dest, and len
1006 const auto *FT = F->getFunctionType();
1007 Type *ParamTypes[2] = {
1008 FT->getParamType(0), // Dest
1009 FT->getParamType(2) // len
1010 };
1011 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::memset,
1012 ParamTypes);
1013 return true;
1014 }
1015 break;
1016 }
1017 case 'n': {
1018 if (Name.startswith("nvvm.")) {
1019 Name = Name.substr(5);
1020
1021 // The following nvvm intrinsics correspond exactly to an LLVM intrinsic.
1022 Intrinsic::ID IID = StringSwitch<Intrinsic::ID>(Name)
1023 .Cases("brev32", "brev64", Intrinsic::bitreverse)
1024 .Case("clz.i", Intrinsic::ctlz)
1025 .Case("popc.i", Intrinsic::ctpop)
1026 .Default(Intrinsic::not_intrinsic);
1027 if (IID != Intrinsic::not_intrinsic && F->arg_size() == 1) {
1028 NewFn = Intrinsic::getDeclaration(F->getParent(), IID,
1029 {F->getReturnType()});
1030 return true;
1031 }
1032
1033 // The following nvvm intrinsics correspond exactly to an LLVM idiom, but
1034 // not to an intrinsic alone. We expand them in UpgradeIntrinsicCall.
1035 //
1036 // TODO: We could add lohi.i2d.
1037 bool Expand = StringSwitch<bool>(Name)
1038 .Cases("abs.i", "abs.ll", true)
1039 .Cases("clz.ll", "popc.ll", "h2f", true)
1040 .Cases("max.i", "max.ll", "max.ui", "max.ull", true)
1041 .Cases("min.i", "min.ll", "min.ui", "min.ull", true)
1042 .StartsWith("atomic.load.add.f32.p", true)
1043 .StartsWith("atomic.load.add.f64.p", true)
1044 .Default(false);
1045 if (Expand) {
1046 NewFn = nullptr;
1047 return true;
1048 }
1049 }
1050 break;
1051 }
1052 case 'o':
1053 // We only need to change the name to match the mangling including the
1054 // address space.
1055 if (Name.startswith("objectsize.")) {
1056 Type *Tys[2] = { F->getReturnType(), F->arg_begin()->getType() };
1057 if (F->arg_size() == 2 || F->arg_size() == 3 ||
1058 F->getName() !=
1059 Intrinsic::getName(Intrinsic::objectsize, Tys, F->getParent())) {
1060 rename(F);
1061 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::objectsize,
1062 Tys);
1063 return true;
1064 }
1065 }
1066 break;
1067
1068 case 'p':
1069 if (Name == "prefetch") {
1070 // Handle address space overloading.
1071 Type *Tys[] = {F->arg_begin()->getType()};
1072 if (F->getName() !=
1073 Intrinsic::getName(Intrinsic::prefetch, Tys, F->getParent())) {
1074 rename(F);
1075 NewFn =
1076 Intrinsic::getDeclaration(F->getParent(), Intrinsic::prefetch, Tys);
1077 return true;
1078 }
1079 } else if (Name.startswith("ptr.annotation.") && F->arg_size() == 4) {
1080 rename(F);
1081 NewFn = Intrinsic::getDeclaration(F->getParent(),
1082 Intrinsic::ptr_annotation,
1083 F->arg_begin()->getType());
1084 return true;
1085 }
1086 break;
1087
1088 case 's':
1089 if (Name == "stackprotectorcheck") {
1090 NewFn = nullptr;
1091 return true;
1092 }
1093 break;
1094
1095 case 'v': {
1096 if (Name == "var.annotation" && F->arg_size() == 4) {
1097 rename(F);
1098 NewFn = Intrinsic::getDeclaration(F->getParent(),
1099 Intrinsic::var_annotation);
1100 return true;
1101 }
1102 break;
1103 }
1104
1105 case 'x':
1106 if (UpgradeX86IntrinsicFunction(F, Name, NewFn))
1107 return true;
1108 }
1109
1110 auto *ST = dyn_cast<StructType>(F->getReturnType());
1111 if (ST && (!ST->isLiteral() || ST->isPacked())) {
1112 // Replace return type with literal non-packed struct. Only do this for
1113 // intrinsics declared to return a struct, not for intrinsics with
1114 // overloaded return type, in which case the exact struct type will be
1115 // mangled into the name.
1116 SmallVector<Intrinsic::IITDescriptor> Desc;
1117 Intrinsic::getIntrinsicInfoTableEntries(F->getIntrinsicID(), Desc);
1118 if (Desc.front().Kind == Intrinsic::IITDescriptor::Struct) {
1119 auto *FT = F->getFunctionType();
1120 auto *NewST = StructType::get(ST->getContext(), ST->elements());
1121 auto *NewFT = FunctionType::get(NewST, FT->params(), FT->isVarArg());
1122 std::string Name = F->getName().str();
1123 rename(F);
1124 NewFn = Function::Create(NewFT, F->getLinkage(), F->getAddressSpace(),
1125 Name, F->getParent());
1126
1127 // The new function may also need remangling.
1128 if (auto Result = llvm::Intrinsic::remangleIntrinsicFunction(NewFn))
1129 NewFn = *Result;
1130 return true;
1131 }
1132 }
1133
1134 // Remangle our intrinsic since we upgrade the mangling
1135 auto Result = llvm::Intrinsic::remangleIntrinsicFunction(F);
1136 if (Result != None) {
1137 NewFn = *Result;
1138 return true;
1139 }
1140
1141 // This may not belong here. This function is effectively being overloaded
1142 // to both detect an intrinsic which needs upgrading, and to provide the
1143 // upgraded form of the intrinsic. We should perhaps have two separate
1144 // functions for this.
1145 return false;
1146}
1147
1148bool llvm::UpgradeIntrinsicFunction(Function *F, Function *&NewFn) {
1149 NewFn = nullptr;
1150 bool Upgraded = UpgradeIntrinsicFunction1(F, NewFn);
1151 assert(F != NewFn && "Intrinsic function upgraded to the same function")(static_cast <bool> (F != NewFn && "Intrinsic function upgraded to the same function"
) ? void (0) : __assert_fail ("F != NewFn && \"Intrinsic function upgraded to the same function\""
, "llvm/lib/IR/AutoUpgrade.cpp", 1151, __extension__ __PRETTY_FUNCTION__
))
;
1152
1153 // Upgrade intrinsic attributes. This does not change the function.
1154 if (NewFn)
1155 F = NewFn;
1156 if (Intrinsic::ID id = F->getIntrinsicID())
1157 F->setAttributes(Intrinsic::getAttributes(F->getContext(), id));
1158 return Upgraded;
1159}
1160
1161GlobalVariable *llvm::UpgradeGlobalVariable(GlobalVariable *GV) {
1162 if (!(GV->hasName() && (GV->getName() == "llvm.global_ctors" ||
1163 GV->getName() == "llvm.global_dtors")) ||
1164 !GV->hasInitializer())
1165 return nullptr;
1166 ArrayType *ATy = dyn_cast<ArrayType>(GV->getValueType());
1167 if (!ATy)
1168 return nullptr;
1169 StructType *STy = dyn_cast<StructType>(ATy->getElementType());
1170 if (!STy || STy->getNumElements() != 2)
1171 return nullptr;
1172
1173 LLVMContext &C = GV->getContext();
1174 IRBuilder<> IRB(C);
1175 auto EltTy = StructType::get(STy->getElementType(0), STy->getElementType(1),
1176 IRB.getInt8PtrTy());
1177 Constant *Init = GV->getInitializer();
1178 unsigned N = Init->getNumOperands();
1179 std::vector<Constant *> NewCtors(N);
1180 for (unsigned i = 0; i != N; ++i) {
1181 auto Ctor = cast<Constant>(Init->getOperand(i));
1182 NewCtors[i] = ConstantStruct::get(
1183 EltTy, Ctor->getAggregateElement(0u), Ctor->getAggregateElement(1),
1184 Constant::getNullValue(IRB.getInt8PtrTy()));
1185 }
1186 Constant *NewInit = ConstantArray::get(ArrayType::get(EltTy, N), NewCtors);
1187
1188 return new GlobalVariable(NewInit->getType(), false, GV->getLinkage(),
1189 NewInit, GV->getName());
1190}
1191
1192// Handles upgrading SSE2/AVX2/AVX512BW PSLLDQ intrinsics by converting them
1193// to byte shuffles.
1194static Value *UpgradeX86PSLLDQIntrinsics(IRBuilder<> &Builder,
1195 Value *Op, unsigned Shift) {
1196 auto *ResultTy = cast<FixedVectorType>(Op->getType());
1197 unsigned NumElts = ResultTy->getNumElements() * 8;
1198
1199 // Bitcast from a 64-bit element type to a byte element type.
1200 Type *VecTy = FixedVectorType::get(Builder.getInt8Ty(), NumElts);
1201 Op = Builder.CreateBitCast(Op, VecTy, "cast");
1202
1203 // We'll be shuffling in zeroes.
1204 Value *Res = Constant::getNullValue(VecTy);
1205
1206 // If shift is less than 16, emit a shuffle to move the bytes. Otherwise,
1207 // we'll just return the zero vector.
1208 if (Shift < 16) {
1209 int Idxs[64];
1210 // 256/512-bit version is split into 2/4 16-byte lanes.
1211 for (unsigned l = 0; l != NumElts; l += 16)
1212 for (unsigned i = 0; i != 16; ++i) {
1213 unsigned Idx = NumElts + i - Shift;
1214 if (Idx < NumElts)
1215 Idx -= NumElts - 16; // end of lane, switch operand.
1216 Idxs[l + i] = Idx + l;
1217 }
1218
1219 Res = Builder.CreateShuffleVector(Res, Op, makeArrayRef(Idxs, NumElts));
1220 }
1221
1222 // Bitcast back to a 64-bit element type.
1223 return Builder.CreateBitCast(Res, ResultTy, "cast");
1224}
1225
1226// Handles upgrading SSE2/AVX2/AVX512BW PSRLDQ intrinsics by converting them
1227// to byte shuffles.
1228static Value *UpgradeX86PSRLDQIntrinsics(IRBuilder<> &Builder, Value *Op,
1229 unsigned Shift) {
1230 auto *ResultTy = cast<FixedVectorType>(Op->getType());
1231 unsigned NumElts = ResultTy->getNumElements() * 8;
1232
1233 // Bitcast from a 64-bit element type to a byte element type.
1234 Type *VecTy = FixedVectorType::get(Builder.getInt8Ty(), NumElts);
1235 Op = Builder.CreateBitCast(Op, VecTy, "cast");
1236
1237 // We'll be shuffling in zeroes.
1238 Value *Res = Constant::getNullValue(VecTy);
1239
1240 // If shift is less than 16, emit a shuffle to move the bytes. Otherwise,
1241 // we'll just return the zero vector.
1242 if (Shift < 16) {
1243 int Idxs[64];
1244 // 256/512-bit version is split into 2/4 16-byte lanes.
1245 for (unsigned l = 0; l != NumElts; l += 16)
1246 for (unsigned i = 0; i != 16; ++i) {
1247 unsigned Idx = i + Shift;
1248 if (Idx >= 16)
1249 Idx += NumElts - 16; // end of lane, switch operand.
1250 Idxs[l + i] = Idx + l;
1251 }
1252
1253 Res = Builder.CreateShuffleVector(Op, Res, makeArrayRef(Idxs, NumElts));
1254 }
1255
1256 // Bitcast back to a 64-bit element type.
1257 return Builder.CreateBitCast(Res, ResultTy, "cast");
1258}
1259
1260static Value *getX86MaskVec(IRBuilder<> &Builder, Value *Mask,
1261 unsigned NumElts) {
1262 assert(isPowerOf2_32(NumElts) && "Expected power-of-2 mask elements")(static_cast <bool> (isPowerOf2_32(NumElts) && "Expected power-of-2 mask elements"
) ? void (0) : __assert_fail ("isPowerOf2_32(NumElts) && \"Expected power-of-2 mask elements\""
, "llvm/lib/IR/AutoUpgrade.cpp", 1262, __extension__ __PRETTY_FUNCTION__
))
;
1263 llvm::VectorType *MaskTy = FixedVectorType::get(
1264 Builder.getInt1Ty(), cast<IntegerType>(Mask->getType())->getBitWidth());
1265 Mask = Builder.CreateBitCast(Mask, MaskTy);
1266
1267 // If we have less than 8 elements (1, 2 or 4), then the starting mask was an
1268 // i8 and we need to extract down to the right number of elements.
1269 if (NumElts <= 4) {
1270 int Indices[4];
1271 for (unsigned i = 0; i != NumElts; ++i)
1272 Indices[i] = i;
1273 Mask = Builder.CreateShuffleVector(
1274 Mask, Mask, makeArrayRef(Indices, NumElts), "extract");
1275 }
1276
1277 return Mask;
1278}
1279
1280static Value *EmitX86Select(IRBuilder<> &Builder, Value *Mask,
1281 Value *Op0, Value *Op1) {
1282 // If the mask is all ones just emit the first operation.
1283 if (const auto *C = dyn_cast<Constant>(Mask))
1284 if (C->isAllOnesValue())
1285 return Op0;
1286
1287 Mask = getX86MaskVec(Builder, Mask,
1288 cast<FixedVectorType>(Op0->getType())->getNumElements());
1289 return Builder.CreateSelect(Mask, Op0, Op1);
1290}
1291
1292static Value *EmitX86ScalarSelect(IRBuilder<> &Builder, Value *Mask,
1293 Value *Op0, Value *Op1) {
1294 // If the mask is all ones just emit the first operation.
1295 if (const auto *C = dyn_cast<Constant>(Mask))
1296 if (C->isAllOnesValue())
1297 return Op0;
1298
1299 auto *MaskTy = FixedVectorType::get(Builder.getInt1Ty(),
1300 Mask->getType()->getIntegerBitWidth());
1301 Mask = Builder.CreateBitCast(Mask, MaskTy);
1302 Mask = Builder.CreateExtractElement(Mask, (uint64_t)0);
1303 return Builder.CreateSelect(Mask, Op0, Op1);
1304}
1305
1306// Handle autoupgrade for masked PALIGNR and VALIGND/Q intrinsics.
1307// PALIGNR handles large immediates by shifting while VALIGN masks the immediate
1308// so we need to handle both cases. VALIGN also doesn't have 128-bit lanes.
1309static Value *UpgradeX86ALIGNIntrinsics(IRBuilder<> &Builder, Value *Op0,
1310 Value *Op1, Value *Shift,
1311 Value *Passthru, Value *Mask,
1312 bool IsVALIGN) {
1313 unsigned ShiftVal = cast<llvm::ConstantInt>(Shift)->getZExtValue();
1314
1315 unsigned NumElts = cast<FixedVectorType>(Op0->getType())->getNumElements();
1316 assert((IsVALIGN || NumElts % 16 == 0) && "Illegal NumElts for PALIGNR!")(static_cast <bool> ((IsVALIGN || NumElts % 16 == 0) &&
"Illegal NumElts for PALIGNR!") ? void (0) : __assert_fail (
"(IsVALIGN || NumElts % 16 == 0) && \"Illegal NumElts for PALIGNR!\""
, "llvm/lib/IR/AutoUpgrade.cpp", 1316, __extension__ __PRETTY_FUNCTION__
))
;
1317 assert((!IsVALIGN || NumElts <= 16) && "NumElts too large for VALIGN!")(static_cast <bool> ((!IsVALIGN || NumElts <= 16) &&
"NumElts too large for VALIGN!") ? void (0) : __assert_fail (
"(!IsVALIGN || NumElts <= 16) && \"NumElts too large for VALIGN!\""
, "llvm/lib/IR/AutoUpgrade.cpp", 1317, __extension__ __PRETTY_FUNCTION__
))
;
1318 assert(isPowerOf2_32(NumElts) && "NumElts not a power of 2!")(static_cast <bool> (isPowerOf2_32(NumElts) && "NumElts not a power of 2!"
) ? void (0) : __assert_fail ("isPowerOf2_32(NumElts) && \"NumElts not a power of 2!\""
, "llvm/lib/IR/AutoUpgrade.cpp", 1318, __extension__ __PRETTY_FUNCTION__
))
;
1319
1320 // Mask the immediate for VALIGN.
1321 if (IsVALIGN)
1322 ShiftVal &= (NumElts - 1);
1323
1324 // If palignr is shifting the pair of vectors more than the size of two
1325 // lanes, emit zero.
1326 if (ShiftVal >= 32)
1327 return llvm::Constant::getNullValue(Op0->getType());
1328
1329 // If palignr is shifting the pair of input vectors more than one lane,
1330 // but less than two lanes, convert to shifting in zeroes.
1331 if (ShiftVal > 16) {
1332 ShiftVal -= 16;
1333 Op1 = Op0;
1334 Op0 = llvm::Constant::getNullValue(Op0->getType());
1335 }
1336
1337 int Indices[64];
1338 // 256-bit palignr operates on 128-bit lanes so we need to handle that
1339 for (unsigned l = 0; l < NumElts; l += 16) {
1340 for (unsigned i = 0; i != 16; ++i) {
1341 unsigned Idx = ShiftVal + i;
1342 if (!IsVALIGN && Idx >= 16) // Disable wrap for VALIGN.
1343 Idx += NumElts - 16; // End of lane, switch operand.
1344 Indices[l + i] = Idx + l;
1345 }
1346 }
1347
1348 Value *Align = Builder.CreateShuffleVector(Op1, Op0,
1349 makeArrayRef(Indices, NumElts),
1350 "palignr");
1351
1352 return EmitX86Select(Builder, Mask, Align, Passthru);
1353}
1354
1355static Value *UpgradeX86VPERMT2Intrinsics(IRBuilder<> &Builder, CallBase &CI,
1356 bool ZeroMask, bool IndexForm) {
1357 Type *Ty = CI.getType();
1358 unsigned VecWidth = Ty->getPrimitiveSizeInBits();
1359 unsigned EltWidth = Ty->getScalarSizeInBits();
1360 bool IsFloat = Ty->isFPOrFPVectorTy();
1361 Intrinsic::ID IID;
1362 if (VecWidth == 128 && EltWidth == 32 && IsFloat)
1363 IID = Intrinsic::x86_avx512_vpermi2var_ps_128;
1364 else if (VecWidth == 128 && EltWidth == 32 && !IsFloat)
1365 IID = Intrinsic::x86_avx512_vpermi2var_d_128;
1366 else if (VecWidth == 128 && EltWidth == 64 && IsFloat)
1367 IID = Intrinsic::x86_avx512_vpermi2var_pd_128;
1368 else if (VecWidth == 128 && EltWidth == 64 && !IsFloat)
1369 IID = Intrinsic::x86_avx512_vpermi2var_q_128;
1370 else if (VecWidth == 256 && EltWidth == 32 && IsFloat)
1371 IID = Intrinsic::x86_avx512_vpermi2var_ps_256;
1372 else if (VecWidth == 256 && EltWidth == 32 && !IsFloat)
1373 IID = Intrinsic::x86_avx512_vpermi2var_d_256;
1374 else if (VecWidth == 256 && EltWidth == 64 && IsFloat)
1375 IID = Intrinsic::x86_avx512_vpermi2var_pd_256;
1376 else if (VecWidth == 256 && EltWidth == 64 && !IsFloat)
1377 IID = Intrinsic::x86_avx512_vpermi2var_q_256;
1378 else if (VecWidth == 512 && EltWidth == 32 && IsFloat)
1379 IID = Intrinsic::x86_avx512_vpermi2var_ps_512;
1380 else if (VecWidth == 512 && EltWidth == 32 && !IsFloat)
1381 IID = Intrinsic::x86_avx512_vpermi2var_d_512;
1382 else if (VecWidth == 512 && EltWidth == 64 && IsFloat)
1383 IID = Intrinsic::x86_avx512_vpermi2var_pd_512;
1384 else if (VecWidth == 512 && EltWidth == 64 && !IsFloat)
1385 IID = Intrinsic::x86_avx512_vpermi2var_q_512;
1386 else if (VecWidth == 128 && EltWidth == 16)
1387 IID = Intrinsic::x86_avx512_vpermi2var_hi_128;
1388 else if (VecWidth == 256 && EltWidth == 16)
1389 IID = Intrinsic::x86_avx512_vpermi2var_hi_256;
1390 else if (VecWidth == 512 && EltWidth == 16)
1391 IID = Intrinsic::x86_avx512_vpermi2var_hi_512;
1392 else if (VecWidth == 128 && EltWidth == 8)
1393 IID = Intrinsic::x86_avx512_vpermi2var_qi_128;
1394 else if (VecWidth == 256 && EltWidth == 8)
1395 IID = Intrinsic::x86_avx512_vpermi2var_qi_256;
1396 else if (VecWidth == 512 && EltWidth == 8)
1397 IID = Intrinsic::x86_avx512_vpermi2var_qi_512;
1398 else
1399 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1399)
;
1400
1401 Value *Args[] = { CI.getArgOperand(0) , CI.getArgOperand(1),
1402 CI.getArgOperand(2) };
1403
1404 // If this isn't index form we need to swap operand 0 and 1.
1405 if (!IndexForm)
1406 std::swap(Args[0], Args[1]);
1407
1408 Value *V = Builder.CreateCall(Intrinsic::getDeclaration(CI.getModule(), IID),
1409 Args);
1410 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(Ty)
1411 : Builder.CreateBitCast(CI.getArgOperand(1),
1412 Ty);
1413 return EmitX86Select(Builder, CI.getArgOperand(3), V, PassThru);
1414}
1415
1416static Value *UpgradeX86BinaryIntrinsics(IRBuilder<> &Builder, CallBase &CI,
1417 Intrinsic::ID IID) {
1418 Type *Ty = CI.getType();
1419 Value *Op0 = CI.getOperand(0);
1420 Value *Op1 = CI.getOperand(1);
1421 Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID, Ty);
1422 Value *Res = Builder.CreateCall(Intrin, {Op0, Op1});
1423
1424 if (CI.arg_size() == 4) { // For masked intrinsics.
1425 Value *VecSrc = CI.getOperand(2);
1426 Value *Mask = CI.getOperand(3);
1427 Res = EmitX86Select(Builder, Mask, Res, VecSrc);
1428 }
1429 return Res;
1430}
1431
1432static Value *upgradeX86Rotate(IRBuilder<> &Builder, CallBase &CI,
1433 bool IsRotateRight) {
1434 Type *Ty = CI.getType();
1435 Value *Src = CI.getArgOperand(0);
1436 Value *Amt = CI.getArgOperand(1);
1437
1438 // Amount may be scalar immediate, in which case create a splat vector.
1439 // Funnel shifts amounts are treated as modulo and types are all power-of-2 so
1440 // we only care about the lowest log2 bits anyway.
1441 if (Amt->getType() != Ty) {
1442 unsigned NumElts = cast<FixedVectorType>(Ty)->getNumElements();
1443 Amt = Builder.CreateIntCast(Amt, Ty->getScalarType(), false);
1444 Amt = Builder.CreateVectorSplat(NumElts, Amt);
1445 }
1446
1447 Intrinsic::ID IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl;
1448 Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID, Ty);
1449 Value *Res = Builder.CreateCall(Intrin, {Src, Src, Amt});
1450
1451 if (CI.arg_size() == 4) { // For masked intrinsics.
1452 Value *VecSrc = CI.getOperand(2);
1453 Value *Mask = CI.getOperand(3);
1454 Res = EmitX86Select(Builder, Mask, Res, VecSrc);
1455 }
1456 return Res;
1457}
1458
1459static Value *upgradeX86vpcom(IRBuilder<> &Builder, CallBase &CI, unsigned Imm,
1460 bool IsSigned) {
1461 Type *Ty = CI.getType();
1462 Value *LHS = CI.getArgOperand(0);
1463 Value *RHS = CI.getArgOperand(1);
1464
1465 CmpInst::Predicate Pred;
1466 switch (Imm) {
1467 case 0x0:
1468 Pred = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
1469 break;
1470 case 0x1:
1471 Pred = IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
1472 break;
1473 case 0x2:
1474 Pred = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
1475 break;
1476 case 0x3:
1477 Pred = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
1478 break;
1479 case 0x4:
1480 Pred = ICmpInst::ICMP_EQ;
1481 break;
1482 case 0x5:
1483 Pred = ICmpInst::ICMP_NE;
1484 break;
1485 case 0x6:
1486 return Constant::getNullValue(Ty); // FALSE
1487 case 0x7:
1488 return Constant::getAllOnesValue(Ty); // TRUE
1489 default:
1490 llvm_unreachable("Unknown XOP vpcom/vpcomu predicate")::llvm::llvm_unreachable_internal("Unknown XOP vpcom/vpcomu predicate"
, "llvm/lib/IR/AutoUpgrade.cpp", 1490)
;
1491 }
1492
1493 Value *Cmp = Builder.CreateICmp(Pred, LHS, RHS);
1494 Value *Ext = Builder.CreateSExt(Cmp, Ty);
1495 return Ext;
1496}
1497
1498static Value *upgradeX86ConcatShift(IRBuilder<> &Builder, CallBase &CI,
1499 bool IsShiftRight, bool ZeroMask) {
1500 Type *Ty = CI.getType();
1501 Value *Op0 = CI.getArgOperand(0);
1502 Value *Op1 = CI.getArgOperand(1);
1503 Value *Amt = CI.getArgOperand(2);
1504
1505 if (IsShiftRight)
1506 std::swap(Op0, Op1);
1507
1508 // Amount may be scalar immediate, in which case create a splat vector.
1509 // Funnel shifts amounts are treated as modulo and types are all power-of-2 so
1510 // we only care about the lowest log2 bits anyway.
1511 if (Amt->getType() != Ty) {
1512 unsigned NumElts = cast<FixedVectorType>(Ty)->getNumElements();
1513 Amt = Builder.CreateIntCast(Amt, Ty->getScalarType(), false);
1514 Amt = Builder.CreateVectorSplat(NumElts, Amt);
1515 }
1516
1517 Intrinsic::ID IID = IsShiftRight ? Intrinsic::fshr : Intrinsic::fshl;
1518 Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID, Ty);
1519 Value *Res = Builder.CreateCall(Intrin, {Op0, Op1, Amt});
1520
1521 unsigned NumArgs = CI.arg_size();
1522 if (NumArgs >= 4) { // For masked intrinsics.
1523 Value *VecSrc = NumArgs == 5 ? CI.getArgOperand(3) :
1524 ZeroMask ? ConstantAggregateZero::get(CI.getType()) :
1525 CI.getArgOperand(0);
1526 Value *Mask = CI.getOperand(NumArgs - 1);
1527 Res = EmitX86Select(Builder, Mask, Res, VecSrc);
1528 }
1529 return Res;
1530}
1531
1532static Value *UpgradeMaskedStore(IRBuilder<> &Builder,
1533 Value *Ptr, Value *Data, Value *Mask,
1534 bool Aligned) {
1535 // Cast the pointer to the right type.
1536 Ptr = Builder.CreateBitCast(Ptr,
1537 llvm::PointerType::getUnqual(Data->getType()));
1538 const Align Alignment =
1539 Aligned
1540 ? Align(Data->getType()->getPrimitiveSizeInBits().getFixedSize() / 8)
1541 : Align(1);
1542
1543 // If the mask is all ones just emit a regular store.
1544 if (const auto *C = dyn_cast<Constant>(Mask))
1545 if (C->isAllOnesValue())
1546 return Builder.CreateAlignedStore(Data, Ptr, Alignment);
1547
1548 // Convert the mask from an integer type to a vector of i1.
1549 unsigned NumElts = cast<FixedVectorType>(Data->getType())->getNumElements();
1550 Mask = getX86MaskVec(Builder, Mask, NumElts);
1551 return Builder.CreateMaskedStore(Data, Ptr, Alignment, Mask);
1552}
1553
1554static Value *UpgradeMaskedLoad(IRBuilder<> &Builder,
1555 Value *Ptr, Value *Passthru, Value *Mask,
1556 bool Aligned) {
1557 Type *ValTy = Passthru->getType();
1558 // Cast the pointer to the right type.
1559 Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(ValTy));
1560 const Align Alignment =
1561 Aligned
1562 ? Align(Passthru->getType()->getPrimitiveSizeInBits().getFixedSize() /
1563 8)
1564 : Align(1);
1565
1566 // If the mask is all ones just emit a regular store.
1567 if (const auto *C = dyn_cast<Constant>(Mask))
1568 if (C->isAllOnesValue())
1569 return Builder.CreateAlignedLoad(ValTy, Ptr, Alignment);
1570
1571 // Convert the mask from an integer type to a vector of i1.
1572 unsigned NumElts = cast<FixedVectorType>(ValTy)->getNumElements();
1573 Mask = getX86MaskVec(Builder, Mask, NumElts);
1574 return Builder.CreateMaskedLoad(ValTy, Ptr, Alignment, Mask, Passthru);
1575}
1576
1577static Value *upgradeAbs(IRBuilder<> &Builder, CallBase &CI) {
1578 Type *Ty = CI.getType();
1579 Value *Op0 = CI.getArgOperand(0);
1580 Function *F = Intrinsic::getDeclaration(CI.getModule(), Intrinsic::abs, Ty);
1581 Value *Res = Builder.CreateCall(F, {Op0, Builder.getInt1(false)});
1582 if (CI.arg_size() == 3)
1583 Res = EmitX86Select(Builder, CI.getArgOperand(2), Res, CI.getArgOperand(1));
1584 return Res;
1585}
1586
1587static Value *upgradePMULDQ(IRBuilder<> &Builder, CallBase &CI, bool IsSigned) {
1588 Type *Ty = CI.getType();
1589
1590 // Arguments have a vXi32 type so cast to vXi64.
1591 Value *LHS = Builder.CreateBitCast(CI.getArgOperand(0), Ty);
1592 Value *RHS = Builder.CreateBitCast(CI.getArgOperand(1), Ty);
1593
1594 if (IsSigned) {
1595 // Shift left then arithmetic shift right.
1596 Constant *ShiftAmt = ConstantInt::get(Ty, 32);
1597 LHS = Builder.CreateShl(LHS, ShiftAmt);
1598 LHS = Builder.CreateAShr(LHS, ShiftAmt);
1599 RHS = Builder.CreateShl(RHS, ShiftAmt);
1600 RHS = Builder.CreateAShr(RHS, ShiftAmt);
1601 } else {
1602 // Clear the upper bits.
1603 Constant *Mask = ConstantInt::get(Ty, 0xffffffff);
1604 LHS = Builder.CreateAnd(LHS, Mask);
1605 RHS = Builder.CreateAnd(RHS, Mask);
1606 }
1607
1608 Value *Res = Builder.CreateMul(LHS, RHS);
1609
1610 if (CI.arg_size() == 4)
1611 Res = EmitX86Select(Builder, CI.getArgOperand(3), Res, CI.getArgOperand(2));
1612
1613 return Res;
1614}
1615
1616// Applying mask on vector of i1's and make sure result is at least 8 bits wide.
1617static Value *ApplyX86MaskOn1BitsVec(IRBuilder<> &Builder, Value *Vec,
1618 Value *Mask) {
1619 unsigned NumElts = cast<FixedVectorType>(Vec->getType())->getNumElements();
1620 if (Mask) {
1621 const auto *C = dyn_cast<Constant>(Mask);
1622 if (!C || !C->isAllOnesValue())
1623 Vec = Builder.CreateAnd(Vec, getX86MaskVec(Builder, Mask, NumElts));
1624 }
1625
1626 if (NumElts < 8) {
1627 int Indices[8];
1628 for (unsigned i = 0; i != NumElts; ++i)
1629 Indices[i] = i;
1630 for (unsigned i = NumElts; i != 8; ++i)
1631 Indices[i] = NumElts + i % NumElts;
1632 Vec = Builder.CreateShuffleVector(Vec,
1633 Constant::getNullValue(Vec->getType()),
1634 Indices);
1635 }
1636 return Builder.CreateBitCast(Vec, Builder.getIntNTy(std::max(NumElts, 8U)));
1637}
1638
1639static Value *upgradeMaskedCompare(IRBuilder<> &Builder, CallBase &CI,
1640 unsigned CC, bool Signed) {
1641 Value *Op0 = CI.getArgOperand(0);
1642 unsigned NumElts = cast<FixedVectorType>(Op0->getType())->getNumElements();
1643
1644 Value *Cmp;
1645 if (CC == 3) {
1646 Cmp = Constant::getNullValue(
1647 FixedVectorType::get(Builder.getInt1Ty(), NumElts));
1648 } else if (CC == 7) {
1649 Cmp = Constant::getAllOnesValue(
1650 FixedVectorType::get(Builder.getInt1Ty(), NumElts));
1651 } else {
1652 ICmpInst::Predicate Pred;
1653 switch (CC) {
1654 default: llvm_unreachable("Unknown condition code")::llvm::llvm_unreachable_internal("Unknown condition code", "llvm/lib/IR/AutoUpgrade.cpp"
, 1654)
;
1655 case 0: Pred = ICmpInst::ICMP_EQ; break;
1656 case 1: Pred = Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; break;
1657 case 2: Pred = Signed ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; break;
1658 case 4: Pred = ICmpInst::ICMP_NE; break;
1659 case 5: Pred = Signed ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; break;
1660 case 6: Pred = Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; break;
1661 }
1662 Cmp = Builder.CreateICmp(Pred, Op0, CI.getArgOperand(1));
1663 }
1664
1665 Value *Mask = CI.getArgOperand(CI.arg_size() - 1);
1666
1667 return ApplyX86MaskOn1BitsVec(Builder, Cmp, Mask);
1668}
1669
1670// Replace a masked intrinsic with an older unmasked intrinsic.
1671static Value *UpgradeX86MaskedShift(IRBuilder<> &Builder, CallBase &CI,
1672 Intrinsic::ID IID) {
1673 Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID);
1674 Value *Rep = Builder.CreateCall(Intrin,
1675 { CI.getArgOperand(0), CI.getArgOperand(1) });
1676 return EmitX86Select(Builder, CI.getArgOperand(3), Rep, CI.getArgOperand(2));
1677}
1678
1679static Value* upgradeMaskedMove(IRBuilder<> &Builder, CallBase &CI) {
1680 Value* A = CI.getArgOperand(0);
1681 Value* B = CI.getArgOperand(1);
1682 Value* Src = CI.getArgOperand(2);
1683 Value* Mask = CI.getArgOperand(3);
1684
1685 Value* AndNode = Builder.CreateAnd(Mask, APInt(8, 1));
1686 Value* Cmp = Builder.CreateIsNotNull(AndNode);
1687 Value* Extract1 = Builder.CreateExtractElement(B, (uint64_t)0);
1688 Value* Extract2 = Builder.CreateExtractElement(Src, (uint64_t)0);
1689 Value* Select = Builder.CreateSelect(Cmp, Extract1, Extract2);
1690 return Builder.CreateInsertElement(A, Select, (uint64_t)0);
1691}
1692
1693
1694static Value* UpgradeMaskToInt(IRBuilder<> &Builder, CallBase &CI) {
1695 Value* Op = CI.getArgOperand(0);
1696 Type* ReturnOp = CI.getType();
1697 unsigned NumElts = cast<FixedVectorType>(CI.getType())->getNumElements();
1698 Value *Mask = getX86MaskVec(Builder, Op, NumElts);
1699 return Builder.CreateSExt(Mask, ReturnOp, "vpmovm2");
1700}
1701
1702// Replace intrinsic with unmasked version and a select.
1703static bool upgradeAVX512MaskToSelect(StringRef Name, IRBuilder<> &Builder,
1704 CallBase &CI, Value *&Rep) {
1705 Name = Name.substr(12); // Remove avx512.mask.
1706
1707 unsigned VecWidth = CI.getType()->getPrimitiveSizeInBits();
1708 unsigned EltWidth = CI.getType()->getScalarSizeInBits();
1709 Intrinsic::ID IID;
1710 if (Name.startswith("max.p")) {
1711 if (VecWidth == 128 && EltWidth == 32)
1712 IID = Intrinsic::x86_sse_max_ps;
1713 else if (VecWidth == 128 && EltWidth == 64)
1714 IID = Intrinsic::x86_sse2_max_pd;
1715 else if (VecWidth == 256 && EltWidth == 32)
1716 IID = Intrinsic::x86_avx_max_ps_256;
1717 else if (VecWidth == 256 && EltWidth == 64)
1718 IID = Intrinsic::x86_avx_max_pd_256;
1719 else
1720 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1720)
;
1721 } else if (Name.startswith("min.p")) {
1722 if (VecWidth == 128 && EltWidth == 32)
1723 IID = Intrinsic::x86_sse_min_ps;
1724 else if (VecWidth == 128 && EltWidth == 64)
1725 IID = Intrinsic::x86_sse2_min_pd;
1726 else if (VecWidth == 256 && EltWidth == 32)
1727 IID = Intrinsic::x86_avx_min_ps_256;
1728 else if (VecWidth == 256 && EltWidth == 64)
1729 IID = Intrinsic::x86_avx_min_pd_256;
1730 else
1731 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1731)
;
1732 } else if (Name.startswith("pshuf.b.")) {
1733 if (VecWidth == 128)
1734 IID = Intrinsic::x86_ssse3_pshuf_b_128;
1735 else if (VecWidth == 256)
1736 IID = Intrinsic::x86_avx2_pshuf_b;
1737 else if (VecWidth == 512)
1738 IID = Intrinsic::x86_avx512_pshuf_b_512;
1739 else
1740 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1740)
;
1741 } else if (Name.startswith("pmul.hr.sw.")) {
1742 if (VecWidth == 128)
1743 IID = Intrinsic::x86_ssse3_pmul_hr_sw_128;
1744 else if (VecWidth == 256)
1745 IID = Intrinsic::x86_avx2_pmul_hr_sw;
1746 else if (VecWidth == 512)
1747 IID = Intrinsic::x86_avx512_pmul_hr_sw_512;
1748 else
1749 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1749)
;
1750 } else if (Name.startswith("pmulh.w.")) {
1751 if (VecWidth == 128)
1752 IID = Intrinsic::x86_sse2_pmulh_w;
1753 else if (VecWidth == 256)
1754 IID = Intrinsic::x86_avx2_pmulh_w;
1755 else if (VecWidth == 512)
1756 IID = Intrinsic::x86_avx512_pmulh_w_512;
1757 else
1758 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1758)
;
1759 } else if (Name.startswith("pmulhu.w.")) {
1760 if (VecWidth == 128)
1761 IID = Intrinsic::x86_sse2_pmulhu_w;
1762 else if (VecWidth == 256)
1763 IID = Intrinsic::x86_avx2_pmulhu_w;
1764 else if (VecWidth == 512)
1765 IID = Intrinsic::x86_avx512_pmulhu_w_512;
1766 else
1767 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1767)
;
1768 } else if (Name.startswith("pmaddw.d.")) {
1769 if (VecWidth == 128)
1770 IID = Intrinsic::x86_sse2_pmadd_wd;
1771 else if (VecWidth == 256)
1772 IID = Intrinsic::x86_avx2_pmadd_wd;
1773 else if (VecWidth == 512)
1774 IID = Intrinsic::x86_avx512_pmaddw_d_512;
1775 else
1776 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1776)
;
1777 } else if (Name.startswith("pmaddubs.w.")) {
1778 if (VecWidth == 128)
1779 IID = Intrinsic::x86_ssse3_pmadd_ub_sw_128;
1780 else if (VecWidth == 256)
1781 IID = Intrinsic::x86_avx2_pmadd_ub_sw;
1782 else if (VecWidth == 512)
1783 IID = Intrinsic::x86_avx512_pmaddubs_w_512;
1784 else
1785 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1785)
;
1786 } else if (Name.startswith("packsswb.")) {
1787 if (VecWidth == 128)
1788 IID = Intrinsic::x86_sse2_packsswb_128;
1789 else if (VecWidth == 256)
1790 IID = Intrinsic::x86_avx2_packsswb;
1791 else if (VecWidth == 512)
1792 IID = Intrinsic::x86_avx512_packsswb_512;
1793 else
1794 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1794)
;
1795 } else if (Name.startswith("packssdw.")) {
1796 if (VecWidth == 128)
1797 IID = Intrinsic::x86_sse2_packssdw_128;
1798 else if (VecWidth == 256)
1799 IID = Intrinsic::x86_avx2_packssdw;
1800 else if (VecWidth == 512)
1801 IID = Intrinsic::x86_avx512_packssdw_512;
1802 else
1803 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1803)
;
1804 } else if (Name.startswith("packuswb.")) {
1805 if (VecWidth == 128)
1806 IID = Intrinsic::x86_sse2_packuswb_128;
1807 else if (VecWidth == 256)
1808 IID = Intrinsic::x86_avx2_packuswb;
1809 else if (VecWidth == 512)
1810 IID = Intrinsic::x86_avx512_packuswb_512;
1811 else
1812 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1812)
;
1813 } else if (Name.startswith("packusdw.")) {
1814 if (VecWidth == 128)
1815 IID = Intrinsic::x86_sse41_packusdw;
1816 else if (VecWidth == 256)
1817 IID = Intrinsic::x86_avx2_packusdw;
1818 else if (VecWidth == 512)
1819 IID = Intrinsic::x86_avx512_packusdw_512;
1820 else
1821 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1821)
;
1822 } else if (Name.startswith("vpermilvar.")) {
1823 if (VecWidth == 128 && EltWidth == 32)
1824 IID = Intrinsic::x86_avx_vpermilvar_ps;
1825 else if (VecWidth == 128 && EltWidth == 64)
1826 IID = Intrinsic::x86_avx_vpermilvar_pd;
1827 else if (VecWidth == 256 && EltWidth == 32)
1828 IID = Intrinsic::x86_avx_vpermilvar_ps_256;
1829 else if (VecWidth == 256 && EltWidth == 64)
1830 IID = Intrinsic::x86_avx_vpermilvar_pd_256;
1831 else if (VecWidth == 512 && EltWidth == 32)
1832 IID = Intrinsic::x86_avx512_vpermilvar_ps_512;
1833 else if (VecWidth == 512 && EltWidth == 64)
1834 IID = Intrinsic::x86_avx512_vpermilvar_pd_512;
1835 else
1836 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1836)
;
1837 } else if (Name == "cvtpd2dq.256") {
1838 IID = Intrinsic::x86_avx_cvt_pd2dq_256;
1839 } else if (Name == "cvtpd2ps.256") {
1840 IID = Intrinsic::x86_avx_cvt_pd2_ps_256;
1841 } else if (Name == "cvttpd2dq.256") {
1842 IID = Intrinsic::x86_avx_cvtt_pd2dq_256;
1843 } else if (Name == "cvttps2dq.128") {
1844 IID = Intrinsic::x86_sse2_cvttps2dq;
1845 } else if (Name == "cvttps2dq.256") {
1846 IID = Intrinsic::x86_avx_cvtt_ps2dq_256;
1847 } else if (Name.startswith("permvar.")) {
1848 bool IsFloat = CI.getType()->isFPOrFPVectorTy();
1849 if (VecWidth == 256 && EltWidth == 32 && IsFloat)
1850 IID = Intrinsic::x86_avx2_permps;
1851 else if (VecWidth == 256 && EltWidth == 32 && !IsFloat)
1852 IID = Intrinsic::x86_avx2_permd;
1853 else if (VecWidth == 256 && EltWidth == 64 && IsFloat)
1854 IID = Intrinsic::x86_avx512_permvar_df_256;
1855 else if (VecWidth == 256 && EltWidth == 64 && !IsFloat)
1856 IID = Intrinsic::x86_avx512_permvar_di_256;
1857 else if (VecWidth == 512 && EltWidth == 32 && IsFloat)
1858 IID = Intrinsic::x86_avx512_permvar_sf_512;
1859 else if (VecWidth == 512 && EltWidth == 32 && !IsFloat)
1860 IID = Intrinsic::x86_avx512_permvar_si_512;
1861 else if (VecWidth == 512 && EltWidth == 64 && IsFloat)
1862 IID = Intrinsic::x86_avx512_permvar_df_512;
1863 else if (VecWidth == 512 && EltWidth == 64 && !IsFloat)
1864 IID = Intrinsic::x86_avx512_permvar_di_512;
1865 else if (VecWidth == 128 && EltWidth == 16)
1866 IID = Intrinsic::x86_avx512_permvar_hi_128;
1867 else if (VecWidth == 256 && EltWidth == 16)
1868 IID = Intrinsic::x86_avx512_permvar_hi_256;
1869 else if (VecWidth == 512 && EltWidth == 16)
1870 IID = Intrinsic::x86_avx512_permvar_hi_512;
1871 else if (VecWidth == 128 && EltWidth == 8)
1872 IID = Intrinsic::x86_avx512_permvar_qi_128;
1873 else if (VecWidth == 256 && EltWidth == 8)
1874 IID = Intrinsic::x86_avx512_permvar_qi_256;
1875 else if (VecWidth == 512 && EltWidth == 8)
1876 IID = Intrinsic::x86_avx512_permvar_qi_512;
1877 else
1878 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1878)
;
1879 } else if (Name.startswith("dbpsadbw.")) {
1880 if (VecWidth == 128)
1881 IID = Intrinsic::x86_avx512_dbpsadbw_128;
1882 else if (VecWidth == 256)
1883 IID = Intrinsic::x86_avx512_dbpsadbw_256;
1884 else if (VecWidth == 512)
1885 IID = Intrinsic::x86_avx512_dbpsadbw_512;
1886 else
1887 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1887)
;
1888 } else if (Name.startswith("pmultishift.qb.")) {
1889 if (VecWidth == 128)
1890 IID = Intrinsic::x86_avx512_pmultishift_qb_128;
1891 else if (VecWidth == 256)
1892 IID = Intrinsic::x86_avx512_pmultishift_qb_256;
1893 else if (VecWidth == 512)
1894 IID = Intrinsic::x86_avx512_pmultishift_qb_512;
1895 else
1896 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1896)
;
1897 } else if (Name.startswith("conflict.")) {
1898 if (Name[9] == 'd' && VecWidth == 128)
1899 IID = Intrinsic::x86_avx512_conflict_d_128;
1900 else if (Name[9] == 'd' && VecWidth == 256)
1901 IID = Intrinsic::x86_avx512_conflict_d_256;
1902 else if (Name[9] == 'd' && VecWidth == 512)
1903 IID = Intrinsic::x86_avx512_conflict_d_512;
1904 else if (Name[9] == 'q' && VecWidth == 128)
1905 IID = Intrinsic::x86_avx512_conflict_q_128;
1906 else if (Name[9] == 'q' && VecWidth == 256)
1907 IID = Intrinsic::x86_avx512_conflict_q_256;
1908 else if (Name[9] == 'q' && VecWidth == 512)
1909 IID = Intrinsic::x86_avx512_conflict_q_512;
1910 else
1911 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1911)
;
1912 } else if (Name.startswith("pavg.")) {
1913 if (Name[5] == 'b' && VecWidth == 128)
1914 IID = Intrinsic::x86_sse2_pavg_b;
1915 else if (Name[5] == 'b' && VecWidth == 256)
1916 IID = Intrinsic::x86_avx2_pavg_b;
1917 else if (Name[5] == 'b' && VecWidth == 512)
1918 IID = Intrinsic::x86_avx512_pavg_b_512;
1919 else if (Name[5] == 'w' && VecWidth == 128)
1920 IID = Intrinsic::x86_sse2_pavg_w;
1921 else if (Name[5] == 'w' && VecWidth == 256)
1922 IID = Intrinsic::x86_avx2_pavg_w;
1923 else if (Name[5] == 'w' && VecWidth == 512)
1924 IID = Intrinsic::x86_avx512_pavg_w_512;
1925 else
1926 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1926)
;
1927 } else
1928 return false;
1929
1930 SmallVector<Value *, 4> Args(CI.args());
1931 Args.pop_back();
1932 Args.pop_back();
1933 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI.getModule(), IID),
1934 Args);
1935 unsigned NumArgs = CI.arg_size();
1936 Rep = EmitX86Select(Builder, CI.getArgOperand(NumArgs - 1), Rep,
1937 CI.getArgOperand(NumArgs - 2));
1938 return true;
1939}
1940
1941/// Upgrade comment in call to inline asm that represents an objc retain release
1942/// marker.
1943void llvm::UpgradeInlineAsmString(std::string *AsmStr) {
1944 size_t Pos;
1945 if (AsmStr->find("mov\tfp") == 0 &&
1946 AsmStr->find("objc_retainAutoreleaseReturnValue") != std::string::npos &&
1947 (Pos = AsmStr->find("# marker")) != std::string::npos) {
1948 AsmStr->replace(Pos, 1, ";");
1949 }
1950}
1951
1952static Value *UpgradeARMIntrinsicCall(StringRef Name, CallBase *CI, Function *F,
1953 IRBuilder<> &Builder) {
1954 if (Name == "mve.vctp64.old") {
1955 // Replace the old v4i1 vctp64 with a v2i1 vctp and predicate-casts to the
1956 // correct type.
1957 Value *VCTP = Builder.CreateCall(
1958 Intrinsic::getDeclaration(F->getParent(), Intrinsic::arm_mve_vctp64),
1959 CI->getArgOperand(0), CI->getName());
1960 Value *C1 = Builder.CreateCall(
1961 Intrinsic::getDeclaration(
1962 F->getParent(), Intrinsic::arm_mve_pred_v2i,
1963 {VectorType::get(Builder.getInt1Ty(), 2, false)}),
1964 VCTP);
1965 return Builder.CreateCall(
1966 Intrinsic::getDeclaration(
1967 F->getParent(), Intrinsic::arm_mve_pred_i2v,
1968 {VectorType::get(Builder.getInt1Ty(), 4, false)}),
1969 C1);
1970 } else if (Name == "mve.mull.int.predicated.v2i64.v4i32.v4i1" ||
1971 Name == "mve.vqdmull.predicated.v2i64.v4i32.v4i1" ||
1972 Name == "mve.vldr.gather.base.predicated.v2i64.v2i64.v4i1" ||
1973 Name == "mve.vldr.gather.base.wb.predicated.v2i64.v2i64.v4i1" ||
1974 Name == "mve.vldr.gather.offset.predicated.v2i64.p0i64.v2i64.v4i1" ||
1975 Name == "mve.vstr.scatter.base.predicated.v2i64.v2i64.v4i1" ||
1976 Name == "mve.vstr.scatter.base.wb.predicated.v2i64.v2i64.v4i1" ||
1977 Name == "mve.vstr.scatter.offset.predicated.p0i64.v2i64.v2i64.v4i1" ||
1978 Name == "cde.vcx1q.predicated.v2i64.v4i1" ||
1979 Name == "cde.vcx1qa.predicated.v2i64.v4i1" ||
1980 Name == "cde.vcx2q.predicated.v2i64.v4i1" ||
1981 Name == "cde.vcx2qa.predicated.v2i64.v4i1" ||
1982 Name == "cde.vcx3q.predicated.v2i64.v4i1" ||
1983 Name == "cde.vcx3qa.predicated.v2i64.v4i1") {
1984 std::vector<Type *> Tys;
1985 unsigned ID = CI->getIntrinsicID();
1986 Type *V2I1Ty = FixedVectorType::get(Builder.getInt1Ty(), 2);
1987 switch (ID) {
1988 case Intrinsic::arm_mve_mull_int_predicated:
1989 case Intrinsic::arm_mve_vqdmull_predicated:
1990 case Intrinsic::arm_mve_vldr_gather_base_predicated:
1991 Tys = {CI->getType(), CI->getOperand(0)->getType(), V2I1Ty};
1992 break;
1993 case Intrinsic::arm_mve_vldr_gather_base_wb_predicated:
1994 case Intrinsic::arm_mve_vstr_scatter_base_predicated:
1995 case Intrinsic::arm_mve_vstr_scatter_base_wb_predicated:
1996 Tys = {CI->getOperand(0)->getType(), CI->getOperand(0)->getType(),
1997 V2I1Ty};
1998 break;
1999 case Intrinsic::arm_mve_vldr_gather_offset_predicated:
2000 Tys = {CI->getType(), CI->getOperand(0)->getType(),
2001 CI->getOperand(1)->getType(), V2I1Ty};
2002 break;
2003 case Intrinsic::arm_mve_vstr_scatter_offset_predicated:
2004 Tys = {CI->getOperand(0)->getType(), CI->getOperand(1)->getType(),
2005 CI->getOperand(2)->getType(), V2I1Ty};
2006 break;
2007 case Intrinsic::arm_cde_vcx1q_predicated:
2008 case Intrinsic::arm_cde_vcx1qa_predicated:
2009 case Intrinsic::arm_cde_vcx2q_predicated:
2010 case Intrinsic::arm_cde_vcx2qa_predicated:
2011 case Intrinsic::arm_cde_vcx3q_predicated:
2012 case Intrinsic::arm_cde_vcx3qa_predicated:
2013 Tys = {CI->getOperand(1)->getType(), V2I1Ty};
2014 break;
2015 default:
2016 llvm_unreachable("Unhandled Intrinsic!")::llvm::llvm_unreachable_internal("Unhandled Intrinsic!", "llvm/lib/IR/AutoUpgrade.cpp"
, 2016)
;
2017 }
2018
2019 std::vector<Value *> Ops;
2020 for (Value *Op : CI->args()) {
2021 Type *Ty = Op->getType();
2022 if (Ty->getScalarSizeInBits() == 1) {
2023 Value *C1 = Builder.CreateCall(
2024 Intrinsic::getDeclaration(
2025 F->getParent(), Intrinsic::arm_mve_pred_v2i,
2026 {VectorType::get(Builder.getInt1Ty(), 4, false)}),
2027 Op);
2028 Op = Builder.CreateCall(
2029 Intrinsic::getDeclaration(F->getParent(),
2030 Intrinsic::arm_mve_pred_i2v, {V2I1Ty}),
2031 C1);
2032 }
2033 Ops.push_back(Op);
2034 }
2035
2036 Function *Fn = Intrinsic::getDeclaration(F->getParent(), ID, Tys);
2037 return Builder.CreateCall(Fn, Ops, CI->getName());
2038 }
2039 llvm_unreachable("Unknown function for ARM CallBase upgrade.")::llvm::llvm_unreachable_internal("Unknown function for ARM CallBase upgrade."
, "llvm/lib/IR/AutoUpgrade.cpp", 2039)
;
2040}
2041
2042/// Upgrade a call to an old intrinsic. All argument and return casting must be
2043/// provided to seamlessly integrate with existing context.
2044void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
2045 Function *F = CI->getCalledFunction();
2046 LLVMContext &C = CI->getContext();
2047 IRBuilder<> Builder(C);
2048 Builder.SetInsertPoint(CI->getParent(), CI->getIterator());
2049
2050 assert(F && "Intrinsic call is not direct?")(static_cast <bool> (F && "Intrinsic call is not direct?"
) ? void (0) : __assert_fail ("F && \"Intrinsic call is not direct?\""
, "llvm/lib/IR/AutoUpgrade.cpp", 2050, __extension__ __PRETTY_FUNCTION__
))
;
1
'?' condition is true
2051
2052 if (!NewFn) {
2
Assuming 'NewFn' is non-null
3
Taking false branch
2053 // Get the Function's name.
2054 StringRef Name = F->getName();
2055
2056 assert(Name.startswith("llvm.") && "Intrinsic doesn't start with 'llvm.'")(static_cast <bool> (Name.startswith("llvm.") &&
"Intrinsic doesn't start with 'llvm.'") ? void (0) : __assert_fail
("Name.startswith(\"llvm.\") && \"Intrinsic doesn't start with 'llvm.'\""
, "llvm/lib/IR/AutoUpgrade.cpp", 2056, __extension__ __PRETTY_FUNCTION__
))
;
2057 Name = Name.substr(5);
2058
2059 bool IsX86 = Name.startswith("x86.");
2060 if (IsX86)
2061 Name = Name.substr(4);
2062 bool IsNVVM = Name.startswith("nvvm.");
2063 if (IsNVVM)
2064 Name = Name.substr(5);
2065 bool IsARM = Name.startswith("arm.");
2066 if (IsARM)
2067 Name = Name.substr(4);
2068
2069 if (IsX86 && Name.startswith("sse4a.movnt.")) {
2070 Module *M = F->getParent();
2071 SmallVector<Metadata *, 1> Elts;
2072 Elts.push_back(
2073 ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1)));
2074 MDNode *Node = MDNode::get(C, Elts);
2075
2076 Value *Arg0 = CI->getArgOperand(0);
2077 Value *Arg1 = CI->getArgOperand(1);
2078
2079 // Nontemporal (unaligned) store of the 0'th element of the float/double
2080 // vector.
2081 Type *SrcEltTy = cast<VectorType>(Arg1->getType())->getElementType();
2082 PointerType *EltPtrTy = PointerType::getUnqual(SrcEltTy);
2083 Value *Addr = Builder.CreateBitCast(Arg0, EltPtrTy, "cast");
2084 Value *Extract =
2085 Builder.CreateExtractElement(Arg1, (uint64_t)0, "extractelement");
2086
2087 StoreInst *SI = Builder.CreateAlignedStore(Extract, Addr, Align(1));
2088 SI->setMetadata(M->getMDKindID("nontemporal"), Node);
2089
2090 // Remove intrinsic.
2091 CI->eraseFromParent();
2092 return;
2093 }
2094
2095 if (IsX86 && (Name.startswith("avx.movnt.") ||
2096 Name.startswith("avx512.storent."))) {
2097 Module *M = F->getParent();
2098 SmallVector<Metadata *, 1> Elts;
2099 Elts.push_back(
2100 ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1)));
2101 MDNode *Node = MDNode::get(C, Elts);
2102
2103 Value *Arg0 = CI->getArgOperand(0);
2104 Value *Arg1 = CI->getArgOperand(1);
2105
2106 // Convert the type of the pointer to a pointer to the stored type.
2107 Value *BC = Builder.CreateBitCast(Arg0,
2108 PointerType::getUnqual(Arg1->getType()),
2109 "cast");
2110 StoreInst *SI = Builder.CreateAlignedStore(
2111 Arg1, BC,
2112 Align(Arg1->getType()->getPrimitiveSizeInBits().getFixedSize() / 8));
2113 SI->setMetadata(M->getMDKindID("nontemporal"), Node);
2114
2115 // Remove intrinsic.
2116 CI->eraseFromParent();
2117 return;
2118 }
2119
2120 if (IsX86 && Name == "sse2.storel.dq") {
2121 Value *Arg0 = CI->getArgOperand(0);
2122 Value *Arg1 = CI->getArgOperand(1);
2123
2124 auto *NewVecTy = FixedVectorType::get(Type::getInt64Ty(C), 2);
2125 Value *BC0 = Builder.CreateBitCast(Arg1, NewVecTy, "cast");
2126 Value *Elt = Builder.CreateExtractElement(BC0, (uint64_t)0);
2127 Value *BC = Builder.CreateBitCast(Arg0,
2128 PointerType::getUnqual(Elt->getType()),
2129 "cast");
2130 Builder.CreateAlignedStore(Elt, BC, Align(1));
2131
2132 // Remove intrinsic.
2133 CI->eraseFromParent();
2134 return;
2135 }
2136
2137 if (IsX86 && (Name.startswith("sse.storeu.") ||
2138 Name.startswith("sse2.storeu.") ||
2139 Name.startswith("avx.storeu."))) {
2140 Value *Arg0 = CI->getArgOperand(0);
2141 Value *Arg1 = CI->getArgOperand(1);
2142
2143 Arg0 = Builder.CreateBitCast(Arg0,
2144 PointerType::getUnqual(Arg1->getType()),
2145 "cast");
2146 Builder.CreateAlignedStore(Arg1, Arg0, Align(1));
2147
2148 // Remove intrinsic.
2149 CI->eraseFromParent();
2150 return;
2151 }
2152
2153 if (IsX86 && Name == "avx512.mask.store.ss") {
2154 Value *Mask = Builder.CreateAnd(CI->getArgOperand(2), Builder.getInt8(1));
2155 UpgradeMaskedStore(Builder, CI->getArgOperand(0), CI->getArgOperand(1),
2156 Mask, false);
2157
2158 // Remove intrinsic.
2159 CI->eraseFromParent();
2160 return;
2161 }
2162
2163 if (IsX86 && (Name.startswith("avx512.mask.store"))) {
2164 // "avx512.mask.storeu." or "avx512.mask.store."
2165 bool Aligned = Name[17] != 'u'; // "avx512.mask.storeu".
2166 UpgradeMaskedStore(Builder, CI->getArgOperand(0), CI->getArgOperand(1),
2167 CI->getArgOperand(2), Aligned);
2168
2169 // Remove intrinsic.
2170 CI->eraseFromParent();
2171 return;
2172 }
2173
2174 Value *Rep;
2175 // Upgrade packed integer vector compare intrinsics to compare instructions.
2176 if (IsX86 && (Name.startswith("sse2.pcmp") ||
2177 Name.startswith("avx2.pcmp"))) {
2178 // "sse2.pcpmpeq." "sse2.pcmpgt." "avx2.pcmpeq." or "avx2.pcmpgt."
2179 bool CmpEq = Name[9] == 'e';
2180 Rep = Builder.CreateICmp(CmpEq ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_SGT,
2181 CI->getArgOperand(0), CI->getArgOperand(1));
2182 Rep = Builder.CreateSExt(Rep, CI->getType(), "");
2183 } else if (IsX86 && (Name.startswith("avx512.broadcastm"))) {
2184 Type *ExtTy = Type::getInt32Ty(C);
2185 if (CI->getOperand(0)->getType()->isIntegerTy(8))
2186 ExtTy = Type::getInt64Ty(C);
2187 unsigned NumElts = CI->getType()->getPrimitiveSizeInBits() /
2188 ExtTy->getPrimitiveSizeInBits();
2189 Rep = Builder.CreateZExt(CI->getArgOperand(0), ExtTy);
2190 Rep = Builder.CreateVectorSplat(NumElts, Rep);
2191 } else if (IsX86 && (Name == "sse.sqrt.ss" ||
2192 Name == "sse2.sqrt.sd")) {
2193 Value *Vec = CI->getArgOperand(0);
2194 Value *Elt0 = Builder.CreateExtractElement(Vec, (uint64_t)0);
2195 Function *Intr = Intrinsic::getDeclaration(F->getParent(),
2196 Intrinsic::sqrt, Elt0->getType());
2197 Elt0 = Builder.CreateCall(Intr, Elt0);
2198 Rep = Builder.CreateInsertElement(Vec, Elt0, (uint64_t)0);
2199 } else if (IsX86 && (Name.startswith("avx.sqrt.p") ||
2200 Name.startswith("sse2.sqrt.p") ||
2201 Name.startswith("sse.sqrt.p"))) {
2202 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(),
2203 Intrinsic::sqrt,
2204 CI->getType()),
2205 {CI->getArgOperand(0)});
2206 } else if (IsX86 && (Name.startswith("avx512.mask.sqrt.p"))) {
2207 if (CI->arg_size() == 4 &&
2208 (!isa<ConstantInt>(CI->getArgOperand(3)) ||
2209 cast<ConstantInt>(CI->getArgOperand(3))->getZExtValue() != 4)) {
2210 Intrinsic::ID IID = Name[18] == 's' ? Intrinsic::x86_avx512_sqrt_ps_512
2211 : Intrinsic::x86_avx512_sqrt_pd_512;
2212
2213 Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(3) };
2214 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(),
2215 IID), Args);
2216 } else {
2217 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(),
2218 Intrinsic::sqrt,
2219 CI->getType()),
2220 {CI->getArgOperand(0)});
2221 }
2222 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
2223 CI->getArgOperand(1));
2224 } else if (IsX86 && (Name.startswith("avx512.ptestm") ||
2225 Name.startswith("avx512.ptestnm"))) {
2226 Value *Op0 = CI->getArgOperand(0);
2227 Value *Op1 = CI->getArgOperand(1);
2228 Value *Mask = CI->getArgOperand(2);
2229 Rep = Builder.CreateAnd(Op0, Op1);
2230 llvm::Type *Ty = Op0->getType();
2231 Value *Zero = llvm::Constant::getNullValue(Ty);
2232 ICmpInst::Predicate Pred =
2233 Name.startswith("avx512.ptestm") ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ;
2234 Rep = Builder.CreateICmp(Pred, Rep, Zero);
2235 Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, Mask);
2236 } else if (IsX86 && (Name.startswith("avx512.mask.pbroadcast"))){
2237 unsigned NumElts = cast<FixedVectorType>(CI->getArgOperand(1)->getType())
2238 ->getNumElements();
2239 Rep = Builder.CreateVectorSplat(NumElts, CI->getArgOperand(0));
2240 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
2241 CI->getArgOperand(1));
2242 } else if (IsX86 && (Name.startswith("avx512.kunpck"))) {
2243 unsigned NumElts = CI->getType()->getScalarSizeInBits();
2244 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), NumElts);
2245 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), NumElts);
2246 int Indices[64];
2247 for (unsigned i = 0; i != NumElts; ++i)
2248 Indices[i] = i;
2249
2250 // First extract half of each vector. This gives better codegen than
2251 // doing it in a single shuffle.
2252 LHS = Builder.CreateShuffleVector(LHS, LHS,
2253 makeArrayRef(Indices, NumElts / 2));
2254 RHS = Builder.CreateShuffleVector(RHS, RHS,
2255 makeArrayRef(Indices, NumElts / 2));
2256 // Concat the vectors.
2257 // NOTE: Operands have to be swapped to match intrinsic definition.
2258 Rep = Builder.CreateShuffleVector(RHS, LHS,
2259 makeArrayRef(Indices, NumElts));
2260 Rep = Builder.CreateBitCast(Rep, CI->getType());
2261 } else if (IsX86 && Name == "avx512.kand.w") {
2262 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
2263 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
2264 Rep = Builder.CreateAnd(LHS, RHS);
2265 Rep = Builder.CreateBitCast(Rep, CI->getType());
2266 } else if (IsX86 && Name == "avx512.kandn.w") {
2267 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
2268 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
2269 LHS = Builder.CreateNot(LHS);
2270 Rep = Builder.CreateAnd(LHS, RHS);
2271 Rep = Builder.CreateBitCast(Rep, CI->getType());
2272 } else if (IsX86 && Name == "avx512.kor.w") {
2273 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
2274 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
2275 Rep = Builder.CreateOr(LHS, RHS);
2276 Rep = Builder.CreateBitCast(Rep, CI->getType());
2277 } else if (IsX86 && Name == "avx512.kxor.w") {
2278 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
2279 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
2280 Rep = Builder.CreateXor(LHS, RHS);
2281 Rep = Builder.CreateBitCast(Rep, CI->getType());
2282 } else if (IsX86 && Name == "avx512.kxnor.w") {
2283 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
2284 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
2285 LHS = Builder.CreateNot(LHS);
2286 Rep = Builder.CreateXor(LHS, RHS);
2287 Rep = Builder.CreateBitCast(Rep, CI->getType());
2288 } else if (IsX86 && Name == "avx512.knot.w") {
2289 Rep = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
2290 Rep = Builder.CreateNot(Rep);
2291 Rep = Builder.CreateBitCast(Rep, CI->getType());
2292 } else if (IsX86 &&
2293 (Name == "avx512.kortestz.w" || Name == "avx512.kortestc.w")) {
2294 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
2295 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
2296 Rep = Builder.CreateOr(LHS, RHS);
2297 Rep = Builder.CreateBitCast(Rep, Builder.getInt16Ty());
2298 Value *C;
2299 if (Name[14] == 'c')
2300 C = ConstantInt::getAllOnesValue(Builder.getInt16Ty());
2301 else
2302 C = ConstantInt::getNullValue(Builder.getInt16Ty());
2303 Rep = Builder.CreateICmpEQ(Rep, C);
2304 Rep = Builder.CreateZExt(Rep, Builder.getInt32Ty());
2305 } else if (IsX86 && (Name == "sse.add.ss" || Name == "sse2.add.sd" ||
2306 Name == "sse.sub.ss" || Name == "sse2.sub.sd" ||
2307 Name == "sse.mul.ss" || Name == "sse2.mul.sd" ||
2308 Name == "sse.div.ss" || Name == "sse2.div.sd")) {
2309 Type *I32Ty = Type::getInt32Ty(C);
2310 Value *Elt0 = Builder.CreateExtractElement(CI->getArgOperand(0),
2311 ConstantInt::get(I32Ty, 0));
2312 Value *Elt1 = Builder.CreateExtractElement(CI->getArgOperand(1),
2313 ConstantInt::get(I32Ty, 0));
2314 Value *EltOp;
2315 if (Name.contains(".add."))
2316 EltOp = Builder.CreateFAdd(Elt0, Elt1);
2317 else if (Name.contains(".sub."))
2318 EltOp = Builder.CreateFSub(Elt0, Elt1);
2319 else if (Name.contains(".mul."))
2320 EltOp = Builder.CreateFMul(Elt0, Elt1);
2321 else
2322 EltOp = Builder.CreateFDiv(Elt0, Elt1);
2323 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), EltOp,
2324 ConstantInt::get(I32Ty, 0));
2325 } else if (IsX86 && Name.startswith("avx512.mask.pcmp")) {
2326 // "avx512.mask.pcmpeq." or "avx512.mask.pcmpgt."
2327 bool CmpEq = Name[16] == 'e';
2328 Rep = upgradeMaskedCompare(Builder, *CI, CmpEq ? 0 : 6, true);
2329 } else if (IsX86 && Name.startswith("avx512.mask.vpshufbitqmb.")) {
2330 Type *OpTy = CI->getArgOperand(0)->getType();
2331 unsigned VecWidth = OpTy->getPrimitiveSizeInBits();
2332 Intrinsic::ID IID;
2333 switch (VecWidth) {
2334 default: llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 2334)
;
2335 case 128: IID = Intrinsic::x86_avx512_vpshufbitqmb_128; break;
2336 case 256: IID = Intrinsic::x86_avx512_vpshufbitqmb_256; break;
2337 case 512: IID = Intrinsic::x86_avx512_vpshufbitqmb_512; break;
2338 }
2339
2340 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
2341 { CI->getOperand(0), CI->getArgOperand(1) });
2342 Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, CI->getArgOperand(2));
2343 } else if (IsX86 && Name.startswith("avx512.mask.fpclass.p")) {
2344 Type *OpTy = CI->getArgOperand(0)->getType();
2345 unsigned VecWidth = OpTy->getPrimitiveSizeInBits();
2346 unsigned EltWidth = OpTy->getScalarSizeInBits();
2347 Intrinsic::ID IID;
2348 if (VecWidth == 128 && EltWidth == 32)
2349 IID = Intrinsic::x86_avx512_fpclass_ps_128;
2350 else if (VecWidth == 256 && EltWidth == 32)
2351 IID = Intrinsic::x86_avx512_fpclass_ps_256;
2352 else if (VecWidth == 512 && EltWidth == 32)
2353 IID = Intrinsic::x86_avx512_fpclass_ps_512;
2354 else if (VecWidth == 128 && EltWidth == 64)
2355 IID = Intrinsic::x86_avx512_fpclass_pd_128;
2356 else if (VecWidth == 256 && EltWidth == 64)
2357 IID = Intrinsic::x86_avx512_fpclass_pd_256;
2358 else if (VecWidth == 512 && EltWidth == 64)
2359 IID = Intrinsic::x86_avx512_fpclass_pd_512;
2360 else
2361 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 2361)
;
2362
2363 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
2364 { CI->getOperand(0), CI->getArgOperand(1) });
2365 Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, CI->getArgOperand(2));
2366 } else if (IsX86 && Name.startswith("avx512.cmp.p")) {
2367 SmallVector<Value *, 4> Args(CI->args());
2368 Type *OpTy = Args[0]->getType();
2369 unsigned VecWidth = OpTy->getPrimitiveSizeInBits();
2370 unsigned EltWidth = OpTy->getScalarSizeInBits();
2371 Intrinsic::ID IID;
2372 if (VecWidth == 128 && EltWidth == 32)
2373 IID = Intrinsic::x86_avx512_mask_cmp_ps_128;
2374 else if (VecWidth == 256 && EltWidth == 32)
2375 IID = Intrinsic::x86_avx512_mask_cmp_ps_256;
2376 else if (VecWidth == 512 && EltWidth == 32)
2377 IID = Intrinsic::x86_avx512_mask_cmp_ps_512;
2378 else if (VecWidth == 128 && EltWidth == 64)
2379 IID = Intrinsic::x86_avx512_mask_cmp_pd_128;
2380 else if (VecWidth == 256 && EltWidth == 64)
2381 IID = Intrinsic::x86_avx512_mask_cmp_pd_256;
2382 else if (VecWidth == 512 && EltWidth == 64)
2383 IID = Intrinsic::x86_avx512_mask_cmp_pd_512;
2384 else
2385 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 2385)
;
2386
2387 Value *Mask = Constant::getAllOnesValue(CI->getType());
2388 if (VecWidth == 512)
2389 std::swap(Mask, Args.back());
2390 Args.push_back(Mask);
2391
2392 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
2393 Args);
2394 } else if (IsX86 && Name.startswith("avx512.mask.cmp.")) {
2395 // Integer compare intrinsics.
2396 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
2397 Rep = upgradeMaskedCompare(Builder, *CI, Imm, true);
2398 } else if (IsX86 && Name.startswith("avx512.mask.ucmp.")) {
2399 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
2400 Rep = upgradeMaskedCompare(Builder, *CI, Imm, false);
2401 } else if (IsX86 && (Name.startswith("avx512.cvtb2mask.") ||
2402 Name.startswith("avx512.cvtw2mask.") ||
2403 Name.startswith("avx512.cvtd2mask.") ||
2404 Name.startswith("avx512.cvtq2mask."))) {
2405 Value *Op = CI->getArgOperand(0);
2406 Value *Zero = llvm::Constant::getNullValue(Op->getType());
2407 Rep = Builder.CreateICmp(ICmpInst::ICMP_SLT, Op, Zero);
2408 Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, nullptr);
2409 } else if(IsX86 && (Name == "ssse3.pabs.b.128" ||
2410 Name == "ssse3.pabs.w.128" ||
2411 Name == "ssse3.pabs.d.128" ||
2412 Name.startswith("avx2.pabs") ||
2413 Name.startswith("avx512.mask.pabs"))) {
2414 Rep = upgradeAbs(Builder, *CI);
2415 } else if (IsX86 && (Name == "sse41.pmaxsb" ||
2416 Name == "sse2.pmaxs.w" ||
2417 Name == "sse41.pmaxsd" ||
2418 Name.startswith("avx2.pmaxs") ||
2419 Name.startswith("avx512.mask.pmaxs"))) {
2420 Rep = UpgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::smax);
2421 } else if (IsX86 && (Name == "sse2.pmaxu.b" ||
2422 Name == "sse41.pmaxuw" ||
2423 Name == "sse41.pmaxud" ||
2424 Name.startswith("avx2.pmaxu") ||
2425 Name.startswith("avx512.mask.pmaxu"))) {
2426 Rep = UpgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::umax);
2427 } else if (IsX86 && (Name == "sse41.pminsb" ||
2428 Name == "sse2.pmins.w" ||
2429 Name == "sse41.pminsd" ||
2430 Name.startswith("avx2.pmins") ||
2431 Name.startswith("avx512.mask.pmins"))) {
2432 Rep = UpgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::smin);
2433 } else if (IsX86 && (Name == "sse2.pminu.b" ||
2434 Name == "sse41.pminuw" ||
2435 Name == "sse41.pminud" ||
2436 Name.startswith("avx2.pminu") ||
2437 Name.startswith("avx512.mask.pminu"))) {
2438 Rep = UpgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::umin);
2439 } else if (IsX86 && (Name == "sse2.pmulu.dq" ||
2440 Name == "avx2.pmulu.dq" ||
2441 Name == "avx512.pmulu.dq.512" ||
2442 Name.startswith("avx512.mask.pmulu.dq."))) {
2443 Rep = upgradePMULDQ(Builder, *CI, /*Signed*/false);
2444 } else if (IsX86 && (Name == "sse41.pmuldq" ||
2445 Name == "avx2.pmul.dq" ||
2446 Name == "avx512.pmul.dq.512" ||
2447 Name.startswith("avx512.mask.pmul.dq."))) {
2448 Rep = upgradePMULDQ(Builder, *CI, /*Signed*/true);
2449 } else if (IsX86 && (Name == "sse.cvtsi2ss" ||
2450 Name == "sse2.cvtsi2sd" ||
2451 Name == "sse.cvtsi642ss" ||
2452 Name == "sse2.cvtsi642sd")) {
2453 Rep = Builder.CreateSIToFP(
2454 CI->getArgOperand(1),
2455 cast<VectorType>(CI->getType())->getElementType());
2456 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0);
2457 } else if (IsX86 && Name == "avx512.cvtusi2sd") {
2458 Rep = Builder.CreateUIToFP(
2459 CI->getArgOperand(1),
2460 cast<VectorType>(CI->getType())->getElementType());
2461 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0);
2462 } else if (IsX86 && Name == "sse2.cvtss2sd") {
2463 Rep = Builder.CreateExtractElement(CI->getArgOperand(1), (uint64_t)0);
2464 Rep = Builder.CreateFPExt(
2465 Rep, cast<VectorType>(CI->getType())->getElementType());
2466 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0);
2467 } else if (IsX86 && (Name == "sse2.cvtdq2pd" ||
2468 Name == "sse2.cvtdq2ps" ||
2469 Name == "avx.cvtdq2.pd.256" ||
2470 Name == "avx.cvtdq2.ps.256" ||
2471 Name.startswith("avx512.mask.cvtdq2pd.") ||
2472 Name.startswith("avx512.mask.cvtudq2pd.") ||
2473 Name.startswith("avx512.mask.cvtdq2ps.") ||
2474 Name.startswith("avx512.mask.cvtudq2ps.") ||
2475 Name.startswith("avx512.mask.cvtqq2pd.") ||
2476 Name.startswith("avx512.mask.cvtuqq2pd.") ||
2477 Name == "avx512.mask.cvtqq2ps.256" ||
2478 Name == "avx512.mask.cvtqq2ps.512" ||
2479 Name == "avx512.mask.cvtuqq2ps.256" ||
2480 Name == "avx512.mask.cvtuqq2ps.512" ||
2481 Name == "sse2.cvtps2pd" ||
2482 Name == "avx.cvt.ps2.pd.256" ||
2483 Name == "avx512.mask.cvtps2pd.128" ||
2484 Name == "avx512.mask.cvtps2pd.256")) {
2485 auto *DstTy = cast<FixedVectorType>(CI->getType());
2486 Rep = CI->getArgOperand(0);
2487 auto *SrcTy = cast<FixedVectorType>(Rep->getType());
2488
2489 unsigned NumDstElts = DstTy->getNumElements();
2490 if (NumDstElts < SrcTy->getNumElements()) {
2491 assert(NumDstElts == 2 && "Unexpected vector size")(static_cast <bool> (NumDstElts == 2 && "Unexpected vector size"
) ? void (0) : __assert_fail ("NumDstElts == 2 && \"Unexpected vector size\""
, "llvm/lib/IR/AutoUpgrade.cpp", 2491, __extension__ __PRETTY_FUNCTION__
))
;
2492 Rep = Builder.CreateShuffleVector(Rep, Rep, ArrayRef<int>{0, 1});
2493 }
2494
2495 bool IsPS2PD = SrcTy->getElementType()->isFloatTy();
2496 bool IsUnsigned = (StringRef::npos != Name.find("cvtu"));
2497 if (IsPS2PD)
2498 Rep = Builder.CreateFPExt(Rep, DstTy, "cvtps2pd");
2499 else if (CI->arg_size() == 4 &&
2500 (!isa<ConstantInt>(CI->getArgOperand(3)) ||
2501 cast<ConstantInt>(CI->getArgOperand(3))->getZExtValue() != 4)) {
2502 Intrinsic::ID IID = IsUnsigned ? Intrinsic::x86_avx512_uitofp_round
2503 : Intrinsic::x86_avx512_sitofp_round;
2504 Function *F = Intrinsic::getDeclaration(CI->getModule(), IID,
2505 { DstTy, SrcTy });
2506 Rep = Builder.CreateCall(F, { Rep, CI->getArgOperand(3) });
2507 } else {
2508 Rep = IsUnsigned ? Builder.CreateUIToFP(Rep, DstTy, "cvt")
2509 : Builder.CreateSIToFP(Rep, DstTy, "cvt");
2510 }
2511
2512 if (CI->arg_size() >= 3)
2513 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
2514 CI->getArgOperand(1));
2515 } else if (IsX86 && (Name.startswith("avx512.mask.vcvtph2ps.") ||
2516 Name.startswith("vcvtph2ps."))) {
2517 auto *DstTy = cast<FixedVectorType>(CI->getType());
2518 Rep = CI->getArgOperand(0);
2519 auto *SrcTy = cast<FixedVectorType>(Rep->getType());
2520 unsigned NumDstElts = DstTy->getNumElements();
2521 if (NumDstElts != SrcTy->getNumElements()) {
2522 assert(NumDstElts == 4 && "Unexpected vector size")(static_cast <bool> (NumDstElts == 4 && "Unexpected vector size"
) ? void (0) : __assert_fail ("NumDstElts == 4 && \"Unexpected vector size\""
, "llvm/lib/IR/AutoUpgrade.cpp", 2522, __extension__ __PRETTY_FUNCTION__
))
;
2523 Rep = Builder.CreateShuffleVector(Rep, Rep, ArrayRef<int>{0, 1, 2, 3});
2524 }
2525 Rep = Builder.CreateBitCast(
2526 Rep, FixedVectorType::get(Type::getHalfTy(C), NumDstElts));
2527 Rep = Builder.CreateFPExt(Rep, DstTy, "cvtph2ps");
2528 if (CI->arg_size() >= 3)
2529 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
2530 CI->getArgOperand(1));
2531 } else if (IsX86 && Name.startswith("avx512.mask.load")) {
2532 // "avx512.mask.loadu." or "avx512.mask.load."
2533 bool Aligned = Name[16] != 'u'; // "avx512.mask.loadu".
2534 Rep =
2535 UpgradeMaskedLoad(Builder, CI->getArgOperand(0), CI->getArgOperand(1),
2536 CI->getArgOperand(2), Aligned);
2537 } else if (IsX86 && Name.startswith("avx512.mask.expand.load.")) {
2538 auto *ResultTy = cast<FixedVectorType>(CI->getType());
2539 Type *PtrTy = ResultTy->getElementType();
2540
2541 // Cast the pointer to element type.
2542 Value *Ptr = Builder.CreateBitCast(CI->getOperand(0),
2543 llvm::PointerType::getUnqual(PtrTy));
2544
2545 Value *MaskVec = getX86MaskVec(Builder, CI->getArgOperand(2),
2546 ResultTy->getNumElements());
2547
2548 Function *ELd = Intrinsic::getDeclaration(F->getParent(),
2549 Intrinsic::masked_expandload,
2550 ResultTy);
2551 Rep = Builder.CreateCall(ELd, { Ptr, MaskVec, CI->getOperand(1) });
2552 } else if (IsX86 && Name.startswith("avx512.mask.compress.store.")) {
2553 auto *ResultTy = cast<VectorType>(CI->getArgOperand(1)->getType());
2554 Type *PtrTy = ResultTy->getElementType();
2555
2556 // Cast the pointer to element type.
2557 Value *Ptr = Builder.CreateBitCast(CI->getOperand(0),
2558 llvm::PointerType::getUnqual(PtrTy));
2559
2560 Value *MaskVec =
2561 getX86MaskVec(Builder, CI->getArgOperand(2),
2562 cast<FixedVectorType>(ResultTy)->getNumElements());
2563
2564 Function *CSt = Intrinsic::getDeclaration(F->getParent(),
2565 Intrinsic::masked_compressstore,
2566 ResultTy);
2567 Rep = Builder.CreateCall(CSt, { CI->getArgOperand(1), Ptr, MaskVec });
2568 } else if (IsX86 && (Name.startswith("avx512.mask.compress.") ||
2569 Name.startswith("avx512.mask.expand."))) {
2570 auto *ResultTy = cast<FixedVectorType>(CI->getType());
2571
2572 Value *MaskVec = getX86MaskVec(Builder, CI->getArgOperand(2),
2573 ResultTy->getNumElements());
2574
2575 bool IsCompress = Name[12] == 'c';
2576 Intrinsic::ID IID = IsCompress ? Intrinsic::x86_avx512_mask_compress
2577 : Intrinsic::x86_avx512_mask_expand;
2578 Function *Intr = Intrinsic::getDeclaration(F->getParent(), IID, ResultTy);
2579 Rep = Builder.CreateCall(Intr, { CI->getOperand(0), CI->getOperand(1),
2580 MaskVec });
2581 } else if (IsX86 && Name.startswith("xop.vpcom")) {
2582 bool IsSigned;
2583 if (Name.endswith("ub") || Name.endswith("uw") || Name.endswith("ud") ||
2584 Name.endswith("uq"))
2585 IsSigned = false;
2586 else if (Name.endswith("b") || Name.endswith("w") || Name.endswith("d") ||
2587 Name.endswith("q"))
2588 IsSigned = true;
2589 else
2590 llvm_unreachable("Unknown suffix")::llvm::llvm_unreachable_internal("Unknown suffix", "llvm/lib/IR/AutoUpgrade.cpp"
, 2590)
;
2591
2592 unsigned Imm;
2593 if (CI->arg_size() == 3) {
2594 Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
2595 } else {
2596 Name = Name.substr(9); // strip off "xop.vpcom"
2597 if (Name.startswith("lt"))
2598 Imm = 0;
2599 else if (Name.startswith("le"))
2600 Imm = 1;
2601 else if (Name.startswith("gt"))
2602 Imm = 2;
2603 else if (Name.startswith("ge"))
2604 Imm = 3;
2605 else if (Name.startswith("eq"))
2606 Imm = 4;
2607 else if (Name.startswith("ne"))
2608 Imm = 5;
2609 else if (Name.startswith("false"))
2610 Imm = 6;
2611 else if (Name.startswith("true"))
2612 Imm = 7;
2613 else
2614 llvm_unreachable("Unknown condition")::llvm::llvm_unreachable_internal("Unknown condition", "llvm/lib/IR/AutoUpgrade.cpp"
, 2614)
;
2615 }
2616
2617 Rep = upgradeX86vpcom(Builder, *CI, Imm, IsSigned);
2618 } else if (IsX86 && Name.startswith("xop.vpcmov")) {
2619 Value *Sel = CI->getArgOperand(2);
2620 Value *NotSel = Builder.CreateNot(Sel);
2621 Value *Sel0 = Builder.CreateAnd(CI->getArgOperand(0), Sel);
2622 Value *Sel1 = Builder.CreateAnd(CI->getArgOperand(1), NotSel);
2623 Rep = Builder.CreateOr(Sel0, Sel1);
2624 } else if (IsX86 && (Name.startswith("xop.vprot") ||
2625 Name.startswith("avx512.prol") ||
2626 Name.startswith("avx512.mask.prol"))) {
2627 Rep = upgradeX86Rotate(Builder, *CI, false);
2628 } else if (IsX86 && (Name.startswith("avx512.pror") ||
2629 Name.startswith("avx512.mask.pror"))) {
2630 Rep = upgradeX86Rotate(Builder, *CI, true);
2631 } else if (IsX86 && (Name.startswith("avx512.vpshld.") ||
2632 Name.startswith("avx512.mask.vpshld") ||
2633 Name.startswith("avx512.maskz.vpshld"))) {
2634 bool ZeroMask = Name[11] == 'z';
2635 Rep = upgradeX86ConcatShift(Builder, *CI, false, ZeroMask);
2636 } else if (IsX86 && (Name.startswith("avx512.vpshrd.") ||
2637 Name.startswith("avx512.mask.vpshrd") ||
2638 Name.startswith("avx512.maskz.vpshrd"))) {
2639 bool ZeroMask = Name[11] == 'z';
2640 Rep = upgradeX86ConcatShift(Builder, *CI, true, ZeroMask);
2641 } else if (IsX86 && Name == "sse42.crc32.64.8") {
2642 Function *CRC32 = Intrinsic::getDeclaration(F->getParent(),
2643 Intrinsic::x86_sse42_crc32_32_8);
2644 Value *Trunc0 = Builder.CreateTrunc(CI->getArgOperand(0), Type::getInt32Ty(C));
2645 Rep = Builder.CreateCall(CRC32, {Trunc0, CI->getArgOperand(1)});
2646 Rep = Builder.CreateZExt(Rep, CI->getType(), "");
2647 } else if (IsX86 && (Name.startswith("avx.vbroadcast.s") ||
2648 Name.startswith("avx512.vbroadcast.s"))) {
2649 // Replace broadcasts with a series of insertelements.
2650 auto *VecTy = cast<FixedVectorType>(CI->getType());
2651 Type *EltTy = VecTy->getElementType();
2652 unsigned EltNum = VecTy->getNumElements();
2653 Value *Cast = Builder.CreateBitCast(CI->getArgOperand(0),
2654 EltTy->getPointerTo());
2655 Value *Load = Builder.CreateLoad(EltTy, Cast);
2656 Type *I32Ty = Type::getInt32Ty(C);
2657 Rep = PoisonValue::get(VecTy);
2658 for (unsigned I = 0; I < EltNum; ++I)
2659 Rep = Builder.CreateInsertElement(Rep, Load,
2660 ConstantInt::get(I32Ty, I));
2661 } else if (IsX86 && (Name.startswith("sse41.pmovsx") ||
2662 Name.startswith("sse41.pmovzx") ||
2663 Name.startswith("avx2.pmovsx") ||
2664 Name.startswith("avx2.pmovzx") ||
2665 Name.startswith("avx512.mask.pmovsx") ||
2666 Name.startswith("avx512.mask.pmovzx"))) {
2667 auto *DstTy = cast<FixedVectorType>(CI->getType());
2668 unsigned NumDstElts = DstTy->getNumElements();
2669
2670 // Extract a subvector of the first NumDstElts lanes and sign/zero extend.
2671 SmallVector<int, 8> ShuffleMask(NumDstElts);
2672 for (unsigned i = 0; i != NumDstElts; ++i)
2673 ShuffleMask[i] = i;
2674
2675 Value *SV =
2676 Builder.CreateShuffleVector(CI->getArgOperand(0), ShuffleMask);
2677
2678 bool DoSext = (StringRef::npos != Name.find("pmovsx"));
2679 Rep = DoSext ? Builder.CreateSExt(SV, DstTy)
2680 : Builder.CreateZExt(SV, DstTy);
2681 // If there are 3 arguments, it's a masked intrinsic so we need a select.
2682 if (CI->arg_size() == 3)
2683 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
2684 CI->getArgOperand(1));
2685 } else if (Name == "avx512.mask.pmov.qd.256" ||
2686 Name == "avx512.mask.pmov.qd.512" ||
2687 Name == "avx512.mask.pmov.wb.256" ||
2688 Name == "avx512.mask.pmov.wb.512") {
2689 Type *Ty = CI->getArgOperand(1)->getType();
2690 Rep = Builder.CreateTrunc(CI->getArgOperand(0), Ty);
2691 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
2692 CI->getArgOperand(1));
2693 } else if (IsX86 && (Name.startswith("avx.vbroadcastf128") ||
2694 Name == "avx2.vbroadcasti128")) {
2695 // Replace vbroadcastf128/vbroadcasti128 with a vector load+shuffle.
2696 Type *EltTy = cast<VectorType>(CI->getType())->getElementType();
2697 unsigned NumSrcElts = 128 / EltTy->getPrimitiveSizeInBits();
2698 auto *VT = FixedVectorType::get(EltTy, NumSrcElts);
2699 Value *Op = Builder.CreatePointerCast(CI->getArgOperand(0),
2700 PointerType::getUnqual(VT));
2701 Value *Load = Builder.CreateAlignedLoad(VT, Op, Align(1));
2702 if (NumSrcElts == 2)
2703 Rep = Builder.CreateShuffleVector(Load, ArrayRef<int>{0, 1, 0, 1});
2704 else
2705 Rep = Builder.CreateShuffleVector(
2706 Load, ArrayRef<int>{0, 1, 2, 3, 0, 1, 2, 3});
2707 } else if (IsX86 && (Name.startswith("avx512.mask.shuf.i") ||
2708 Name.startswith("avx512.mask.shuf.f"))) {
2709 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
2710 Type *VT = CI->getType();
2711 unsigned NumLanes = VT->getPrimitiveSizeInBits() / 128;
2712 unsigned NumElementsInLane = 128 / VT->getScalarSizeInBits();
2713 unsigned ControlBitsMask = NumLanes - 1;
2714 unsigned NumControlBits = NumLanes / 2;
2715 SmallVector<int, 8> ShuffleMask(0);
2716
2717 for (unsigned l = 0; l != NumLanes; ++l) {
2718 unsigned LaneMask = (Imm >> (l * NumControlBits)) & ControlBitsMask;
2719 // We actually need the other source.
2720 if (l >= NumLanes / 2)
2721 LaneMask += NumLanes;
2722 for (unsigned i = 0; i != NumElementsInLane; ++i)
2723 ShuffleMask.push_back(LaneMask * NumElementsInLane + i);
2724 }
2725 Rep = Builder.CreateShuffleVector(CI->getArgOperand(0),
2726 CI->getArgOperand(1), ShuffleMask);
2727 Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep,
2728 CI->getArgOperand(3));
2729 }else if (IsX86 && (Name.startswith("avx512.mask.broadcastf") ||
2730 Name.startswith("avx512.mask.broadcasti"))) {
2731 unsigned NumSrcElts =
2732 cast<FixedVectorType>(CI->getArgOperand(0)->getType())
2733 ->getNumElements();
2734 unsigned NumDstElts =
2735 cast<FixedVectorType>(CI->getType())->getNumElements();
2736
2737 SmallVector<int, 8> ShuffleMask(NumDstElts);
2738 for (unsigned i = 0; i != NumDstElts; ++i)
2739 ShuffleMask[i] = i % NumSrcElts;
2740
2741 Rep = Builder.CreateShuffleVector(CI->getArgOperand(0),
2742 CI->getArgOperand(0),
2743 ShuffleMask);
2744 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
2745 CI->getArgOperand(1));
2746 } else if (IsX86 && (Name.startswith("avx2.pbroadcast") ||
2747 Name.startswith("avx2.vbroadcast") ||
2748 Name.startswith("avx512.pbroadcast") ||
2749 Name.startswith("avx512.mask.broadcast.s"))) {
2750 // Replace vp?broadcasts with a vector shuffle.
2751 Value *Op = CI->getArgOperand(0);
2752 ElementCount EC = cast<VectorType>(CI->getType())->getElementCount();
2753 Type *MaskTy = VectorType::get(Type::getInt32Ty(C), EC);
2754 SmallVector<int, 8> M;
2755 ShuffleVectorInst::getShuffleMask(Constant::getNullValue(MaskTy), M);
2756 Rep = Builder.CreateShuffleVector(Op, M);
2757
2758 if (CI->arg_size() == 3)
2759 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
2760 CI->getArgOperand(1));
2761 } else if (IsX86 && (Name.startswith("sse2.padds.") ||
2762 Name.startswith("avx2.padds.") ||
2763 Name.startswith("avx512.padds.") ||
2764 Name.startswith("avx512.mask.padds."))) {
2765 Rep = UpgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::sadd_sat);
2766 } else if (IsX86 && (Name.startswith("sse2.psubs.") ||
2767 Name.startswith("avx2.psubs.") ||
2768 Name.startswith("avx512.psubs.") ||
2769 Name.startswith("avx512.mask.psubs."))) {
2770 Rep = UpgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::ssub_sat);
2771 } else if (IsX86 && (Name.startswith("sse2.paddus.") ||
2772 Name.startswith("avx2.paddus.") ||
2773 Name.startswith("avx512.mask.paddus."))) {
2774 Rep = UpgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::uadd_sat);
2775 } else if (IsX86 && (Name.startswith("sse2.psubus.") ||
2776 Name.startswith("avx2.psubus.") ||
2777 Name.startswith("avx512.mask.psubus."))) {
2778 Rep = UpgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::usub_sat);
2779 } else if (IsX86 && Name.startswith("avx512.mask.palignr.")) {
2780 Rep = UpgradeX86ALIGNIntrinsics(Builder, CI->getArgOperand(0),
2781 CI->getArgOperand(1),
2782 CI->getArgOperand(2),
2783 CI->getArgOperand(3),
2784 CI->getArgOperand(4),
2785 false);
2786 } else if (IsX86 && Name.startswith("avx512.mask.valign.")) {
2787 Rep = UpgradeX86ALIGNIntrinsics(Builder, CI->getArgOperand(0),
2788 CI->getArgOperand(1),
2789 CI->getArgOperand(2),
2790 CI->getArgOperand(3),
2791 CI->getArgOperand(4),
2792 true);
2793 } else if (IsX86 && (Name == "sse2.psll.dq" ||
2794 Name == "avx2.psll.dq")) {
2795 // 128/256-bit shift left specified in bits.
2796 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
2797 Rep = UpgradeX86PSLLDQIntrinsics(Builder, CI->getArgOperand(0),
2798 Shift / 8); // Shift is in bits.
2799 } else if (IsX86 && (Name == "sse2.psrl.dq" ||
2800 Name == "avx2.psrl.dq")) {
2801 // 128/256-bit shift right specified in bits.
2802 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
2803 Rep = UpgradeX86PSRLDQIntrinsics(Builder, CI->getArgOperand(0),
2804 Shift / 8); // Shift is in bits.
2805 } else if (IsX86 && (Name == "sse2.psll.dq.bs" ||
2806 Name == "avx2.psll.dq.bs" ||
2807 Name == "avx512.psll.dq.512")) {
2808 // 128/256/512-bit shift left specified in bytes.
2809 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
2810 Rep = UpgradeX86PSLLDQIntrinsics(Builder, CI->getArgOperand(0), Shift);
2811 } else if (IsX86 && (Name == "sse2.psrl.dq.bs" ||
2812 Name == "avx2.psrl.dq.bs" ||
2813 Name == "avx512.psrl.dq.512")) {
2814 // 128/256/512-bit shift right specified in bytes.
2815 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
2816 Rep = UpgradeX86PSRLDQIntrinsics(Builder, CI->getArgOperand(0), Shift);
2817 } else if (IsX86 && (Name == "sse41.pblendw" ||
2818 Name.startswith("sse41.blendp") ||
2819 Name.startswith("avx.blend.p") ||
2820 Name == "avx2.pblendw" ||
2821 Name.startswith("avx2.pblendd."))) {
2822 Value *Op0 = CI->getArgOperand(0);
2823 Value *Op1 = CI->getArgOperand(1);
2824 unsigned Imm = cast <ConstantInt>(CI->getArgOperand(2))->getZExtValue();
2825 auto *VecTy = cast<FixedVectorType>(CI->getType());
2826 unsigned NumElts = VecTy->getNumElements();
2827
2828 SmallVector<int, 16> Idxs(NumElts);
2829 for (unsigned i = 0; i != NumElts; ++i)
2830 Idxs[i] = ((Imm >> (i%8)) & 1) ? i + NumElts : i;
2831
2832 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
2833 } else if (IsX86 && (Name.startswith("avx.vinsertf128.") ||
2834 Name == "avx2.vinserti128" ||
2835 Name.startswith("avx512.mask.insert"))) {
2836 Value *Op0 = CI->getArgOperand(0);
2837 Value *Op1 = CI->getArgOperand(1);
2838 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
2839 unsigned DstNumElts =
2840 cast<FixedVectorType>(CI->getType())->getNumElements();
2841 unsigned SrcNumElts =
2842 cast<FixedVectorType>(Op1->getType())->getNumElements();
2843 unsigned Scale = DstNumElts / SrcNumElts;
2844
2845 // Mask off the high bits of the immediate value; hardware ignores those.
2846 Imm = Imm % Scale;
2847
2848 // Extend the second operand into a vector the size of the destination.
2849 SmallVector<int, 8> Idxs(DstNumElts);
2850 for (unsigned i = 0; i != SrcNumElts; ++i)
2851 Idxs[i] = i;
2852 for (unsigned i = SrcNumElts; i != DstNumElts; ++i)
2853 Idxs[i] = SrcNumElts;
2854 Rep = Builder.CreateShuffleVector(Op1, Idxs);
2855
2856 // Insert the second operand into the first operand.
2857
2858 // Note that there is no guarantee that instruction lowering will actually
2859 // produce a vinsertf128 instruction for the created shuffles. In
2860 // particular, the 0 immediate case involves no lane changes, so it can
2861 // be handled as a blend.
2862
2863 // Example of shuffle mask for 32-bit elements:
2864 // Imm = 1 <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
2865 // Imm = 0 <i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7 >
2866
2867 // First fill with identify mask.
2868 for (unsigned i = 0; i != DstNumElts; ++i)
2869 Idxs[i] = i;
2870 // Then replace the elements where we need to insert.
2871 for (unsigned i = 0; i != SrcNumElts; ++i)
2872 Idxs[i + Imm * SrcNumElts] = i + DstNumElts;
2873 Rep = Builder.CreateShuffleVector(Op0, Rep, Idxs);
2874
2875 // If the intrinsic has a mask operand, handle that.
2876 if (CI->arg_size() == 5)
2877 Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep,
2878 CI->getArgOperand(3));
2879 } else if (IsX86 && (Name.startswith("avx.vextractf128.") ||
2880 Name == "avx2.vextracti128" ||
2881 Name.startswith("avx512.mask.vextract"))) {
2882 Value *Op0 = CI->getArgOperand(0);
2883 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
2884 unsigned DstNumElts =
2885 cast<FixedVectorType>(CI->getType())->getNumElements();
2886 unsigned SrcNumElts =
2887 cast<FixedVectorType>(Op0->getType())->getNumElements();
2888 unsigned Scale = SrcNumElts / DstNumElts;
2889
2890 // Mask off the high bits of the immediate value; hardware ignores those.
2891 Imm = Imm % Scale;
2892
2893 // Get indexes for the subvector of the input vector.
2894 SmallVector<int, 8> Idxs(DstNumElts);
2895 for (unsigned i = 0; i != DstNumElts; ++i) {
2896 Idxs[i] = i + (Imm * DstNumElts);
2897 }
2898 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
2899
2900 // If the intrinsic has a mask operand, handle that.
2901 if (CI->arg_size() == 4)
2902 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
2903 CI->getArgOperand(2));
2904 } else if (!IsX86 && Name == "stackprotectorcheck") {
2905 Rep = nullptr;
2906 } else if (IsX86 && (Name.startswith("avx512.mask.perm.df.") ||
2907 Name.startswith("avx512.mask.perm.di."))) {
2908 Value *Op0 = CI->getArgOperand(0);
2909 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
2910 auto *VecTy = cast<FixedVectorType>(CI->getType());
2911 unsigned NumElts = VecTy->getNumElements();
2912
2913 SmallVector<int, 8> Idxs(NumElts);
2914 for (unsigned i = 0; i != NumElts; ++i)
2915 Idxs[i] = (i & ~0x3) + ((Imm >> (2 * (i & 0x3))) & 3);
2916
2917 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
2918
2919 if (CI->arg_size() == 4)
2920 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
2921 CI->getArgOperand(2));
2922 } else if (IsX86 && (Name.startswith("avx.vperm2f128.") ||
2923 Name == "avx2.vperm2i128")) {
2924 // The immediate permute control byte looks like this:
2925 // [1:0] - select 128 bits from sources for low half of destination
2926 // [2] - ignore
2927 // [3] - zero low half of destination
2928 // [5:4] - select 128 bits from sources for high half of destination
2929 // [6] - ignore
2930 // [7] - zero high half of destination
2931
2932 uint8_t Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
2933
2934 unsigned NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
2935 unsigned HalfSize = NumElts / 2;
2936 SmallVector<int, 8> ShuffleMask(NumElts);
2937
2938 // Determine which operand(s) are actually in use for this instruction.
2939 Value *V0 = (Imm & 0x02) ? CI->getArgOperand(1) : CI->getArgOperand(0);
2940 Value *V1 = (Imm & 0x20) ? CI->getArgOperand(1) : CI->getArgOperand(0);
2941
2942 // If needed, replace operands based on zero mask.
2943 V0 = (Imm & 0x08) ? ConstantAggregateZero::get(CI->getType()) : V0;
2944 V1 = (Imm & 0x80) ? ConstantAggregateZero::get(CI->getType()) : V1;
2945
2946 // Permute low half of result.
2947 unsigned StartIndex = (Imm & 0x01) ? HalfSize : 0;
2948 for (unsigned i = 0; i < HalfSize; ++i)
2949 ShuffleMask[i] = StartIndex + i;
2950
2951 // Permute high half of result.
2952 StartIndex = (Imm & 0x10) ? HalfSize : 0;
2953 for (unsigned i = 0; i < HalfSize; ++i)
2954 ShuffleMask[i + HalfSize] = NumElts + StartIndex + i;
2955
2956 Rep = Builder.CreateShuffleVector(V0, V1, ShuffleMask);
2957
2958 } else if (IsX86 && (Name.startswith("avx.vpermil.") ||
2959 Name == "sse2.pshuf.d" ||
2960 Name.startswith("avx512.mask.vpermil.p") ||
2961 Name.startswith("avx512.mask.pshuf.d."))) {
2962 Value *Op0 = CI->getArgOperand(0);
2963 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
2964 auto *VecTy = cast<FixedVectorType>(CI->getType());
2965 unsigned NumElts = VecTy->getNumElements();
2966 // Calculate the size of each index in the immediate.
2967 unsigned IdxSize = 64 / VecTy->getScalarSizeInBits();
2968 unsigned IdxMask = ((1 << IdxSize) - 1);
2969
2970 SmallVector<int, 8> Idxs(NumElts);
2971 // Lookup the bits for this element, wrapping around the immediate every
2972 // 8-bits. Elements are grouped into sets of 2 or 4 elements so we need
2973 // to offset by the first index of each group.
2974 for (unsigned i = 0; i != NumElts; ++i)
2975 Idxs[i] = ((Imm >> ((i * IdxSize) % 8)) & IdxMask) | (i & ~IdxMask);
2976
2977 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
2978
2979 if (CI->arg_size() == 4)
2980 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
2981 CI->getArgOperand(2));
2982 } else if (IsX86 && (Name == "sse2.pshufl.w" ||
2983 Name.startswith("avx512.mask.pshufl.w."))) {
2984 Value *Op0 = CI->getArgOperand(0);
2985 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
2986 unsigned NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
2987
2988 SmallVector<int, 16> Idxs(NumElts);
2989 for (unsigned l = 0; l != NumElts; l += 8) {
2990 for (unsigned i = 0; i != 4; ++i)
2991 Idxs[i + l] = ((Imm >> (2 * i)) & 0x3) + l;
2992 for (unsigned i = 4; i != 8; ++i)
2993 Idxs[i + l] = i + l;
2994 }
2995
2996 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
2997
2998 if (CI->arg_size() == 4)
2999 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3000 CI->getArgOperand(2));
3001 } else if (IsX86 && (Name == "sse2.pshufh.w" ||
3002 Name.startswith("avx512.mask.pshufh.w."))) {
3003 Value *Op0 = CI->getArgOperand(0);
3004 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
3005 unsigned NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
3006
3007 SmallVector<int, 16> Idxs(NumElts);
3008 for (unsigned l = 0; l != NumElts; l += 8) {
3009 for (unsigned i = 0; i != 4; ++i)
3010 Idxs[i + l] = i + l;
3011 for (unsigned i = 0; i != 4; ++i)
3012 Idxs[i + l + 4] = ((Imm >> (2 * i)) & 0x3) + 4 + l;
3013 }
3014
3015 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
3016
3017 if (CI->arg_size() == 4)
3018 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3019 CI->getArgOperand(2));
3020 } else if (IsX86 && Name.startswith("avx512.mask.shuf.p")) {
3021 Value *Op0 = CI->getArgOperand(0);
3022 Value *Op1 = CI->getArgOperand(1);
3023 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
3024 unsigned NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
3025
3026 unsigned NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
3027 unsigned HalfLaneElts = NumLaneElts / 2;
3028
3029 SmallVector<int, 16> Idxs(NumElts);
3030 for (unsigned i = 0; i != NumElts; ++i) {
3031 // Base index is the starting element of the lane.
3032 Idxs[i] = i - (i % NumLaneElts);
3033 // If we are half way through the lane switch to the other source.
3034 if ((i % NumLaneElts) >= HalfLaneElts)
3035 Idxs[i] += NumElts;
3036 // Now select the specific element. By adding HalfLaneElts bits from
3037 // the immediate. Wrapping around the immediate every 8-bits.
3038 Idxs[i] += (Imm >> ((i * HalfLaneElts) % 8)) & ((1 << HalfLaneElts) - 1);
3039 }
3040
3041 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
3042
3043 Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep,
3044 CI->getArgOperand(3));
3045 } else if (IsX86 && (Name.startswith("avx512.mask.movddup") ||
3046 Name.startswith("avx512.mask.movshdup") ||
3047 Name.startswith("avx512.mask.movsldup"))) {
3048 Value *Op0 = CI->getArgOperand(0);
3049 unsigned NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
3050 unsigned NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
3051
3052 unsigned Offset = 0;
3053 if (Name.startswith("avx512.mask.movshdup."))
3054 Offset = 1;
3055
3056 SmallVector<int, 16> Idxs(NumElts);
3057 for (unsigned l = 0; l != NumElts; l += NumLaneElts)
3058 for (unsigned i = 0; i != NumLaneElts; i += 2) {
3059 Idxs[i + l + 0] = i + l + Offset;
3060 Idxs[i + l + 1] = i + l + Offset;
3061 }
3062
3063 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
3064
3065 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
3066 CI->getArgOperand(1));
3067 } else if (IsX86 && (Name.startswith("avx512.mask.punpckl") ||
3068 Name.startswith("avx512.mask.unpckl."))) {
3069 Value *Op0 = CI->getArgOperand(0);
3070 Value *Op1 = CI->getArgOperand(1);
3071 int NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
3072 int NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
3073
3074 SmallVector<int, 64> Idxs(NumElts);
3075 for (int l = 0; l != NumElts; l += NumLaneElts)
3076 for (int i = 0; i != NumLaneElts; ++i)
3077 Idxs[i + l] = l + (i / 2) + NumElts * (i % 2);
3078
3079 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
3080
3081 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3082 CI->getArgOperand(2));
3083 } else if (IsX86 && (Name.startswith("avx512.mask.punpckh") ||
3084 Name.startswith("avx512.mask.unpckh."))) {
3085 Value *Op0 = CI->getArgOperand(0);
3086 Value *Op1 = CI->getArgOperand(1);
3087 int NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
3088 int NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
3089
3090 SmallVector<int, 64> Idxs(NumElts);
3091 for (int l = 0; l != NumElts; l += NumLaneElts)
3092 for (int i = 0; i != NumLaneElts; ++i)
3093 Idxs[i + l] = (NumLaneElts / 2) + l + (i / 2) + NumElts * (i % 2);
3094
3095 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
3096
3097 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3098 CI->getArgOperand(2));
3099 } else if (IsX86 && (Name.startswith("avx512.mask.and.") ||
3100 Name.startswith("avx512.mask.pand."))) {
3101 VectorType *FTy = cast<VectorType>(CI->getType());
3102 VectorType *ITy = VectorType::getInteger(FTy);
3103 Rep = Builder.CreateAnd(Builder.CreateBitCast(CI->getArgOperand(0), ITy),
3104 Builder.CreateBitCast(CI->getArgOperand(1), ITy));
3105 Rep = Builder.CreateBitCast(Rep, FTy);
3106 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3107 CI->getArgOperand(2));
3108 } else if (IsX86 && (Name.startswith("avx512.mask.andn.") ||
3109 Name.startswith("avx512.mask.pandn."))) {
3110 VectorType *FTy = cast<VectorType>(CI->getType());
3111 VectorType *ITy = VectorType::getInteger(FTy);
3112 Rep = Builder.CreateNot(Builder.CreateBitCast(CI->getArgOperand(0), ITy));
3113 Rep = Builder.CreateAnd(Rep,
3114 Builder.CreateBitCast(CI->getArgOperand(1), ITy));
3115 Rep = Builder.CreateBitCast(Rep, FTy);
3116 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3117 CI->getArgOperand(2));
3118 } else if (IsX86 && (Name.startswith("avx512.mask.or.") ||
3119 Name.startswith("avx512.mask.por."))) {
3120 VectorType *FTy = cast<VectorType>(CI->getType());
3121 VectorType *ITy = VectorType::getInteger(FTy);
3122 Rep = Builder.CreateOr(Builder.CreateBitCast(CI->getArgOperand(0), ITy),
3123 Builder.CreateBitCast(CI->getArgOperand(1), ITy));
3124 Rep = Builder.CreateBitCast(Rep, FTy);
3125 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3126 CI->getArgOperand(2));
3127 } else if (IsX86 && (Name.startswith("avx512.mask.xor.") ||
3128 Name.startswith("avx512.mask.pxor."))) {
3129 VectorType *FTy = cast<VectorType>(CI->getType());
3130 VectorType *ITy = VectorType::getInteger(FTy);
3131 Rep = Builder.CreateXor(Builder.CreateBitCast(CI->getArgOperand(0), ITy),
3132 Builder.CreateBitCast(CI->getArgOperand(1), ITy));
3133 Rep = Builder.CreateBitCast(Rep, FTy);
3134 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3135 CI->getArgOperand(2));
3136 } else if (IsX86 && Name.startswith("avx512.mask.padd.")) {
3137 Rep = Builder.CreateAdd(CI->getArgOperand(0), CI->getArgOperand(1));
3138 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3139 CI->getArgOperand(2));
3140 } else if (IsX86 && Name.startswith("avx512.mask.psub.")) {
3141 Rep = Builder.CreateSub(CI->getArgOperand(0), CI->getArgOperand(1));
3142 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3143 CI->getArgOperand(2));
3144 } else if (IsX86 && Name.startswith("avx512.mask.pmull.")) {
3145 Rep = Builder.CreateMul(CI->getArgOperand(0), CI->getArgOperand(1));
3146 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3147 CI->getArgOperand(2));
3148 } else if (IsX86 && Name.startswith("avx512.mask.add.p")) {
3149 if (Name.endswith(".512")) {
3150 Intrinsic::ID IID;
3151 if (Name[17] == 's')
3152 IID = Intrinsic::x86_avx512_add_ps_512;
3153 else
3154 IID = Intrinsic::x86_avx512_add_pd_512;
3155
3156 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
3157 { CI->getArgOperand(0), CI->getArgOperand(1),
3158 CI->getArgOperand(4) });
3159 } else {
3160 Rep = Builder.CreateFAdd(CI->getArgOperand(0), CI->getArgOperand(1));
3161 }
3162 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3163 CI->getArgOperand(2));
3164 } else if (IsX86 && Name.startswith("avx512.mask.div.p")) {
3165 if (Name.endswith(".512")) {
3166 Intrinsic::ID IID;
3167 if (Name[17] == 's')
3168 IID = Intrinsic::x86_avx512_div_ps_512;
3169 else
3170 IID = Intrinsic::x86_avx512_div_pd_512;
3171
3172 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
3173 { CI->getArgOperand(0), CI->getArgOperand(1),
3174 CI->getArgOperand(4) });
3175 } else {
3176 Rep = Builder.CreateFDiv(CI->getArgOperand(0), CI->getArgOperand(1));
3177 }
3178 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3179 CI->getArgOperand(2));
3180 } else if (IsX86 && Name.startswith("avx512.mask.mul.p")) {
3181 if (Name.endswith(".512")) {
3182 Intrinsic::ID IID;
3183 if (Name[17] == 's')
3184 IID = Intrinsic::x86_avx512_mul_ps_512;
3185 else
3186 IID = Intrinsic::x86_avx512_mul_pd_512;
3187
3188 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
3189 { CI->getArgOperand(0), CI->getArgOperand(1),
3190 CI->getArgOperand(4) });
3191 } else {
3192 Rep = Builder.CreateFMul(CI->getArgOperand(0), CI->getArgOperand(1));
3193 }
3194 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3195 CI->getArgOperand(2));
3196 } else if (IsX86 && Name.startswith("avx512.mask.sub.p")) {
3197 if (Name.endswith(".512")) {
3198 Intrinsic::ID IID;
3199 if (Name[17] == 's')
3200 IID = Intrinsic::x86_avx512_sub_ps_512;
3201 else
3202 IID = Intrinsic::x86_avx512_sub_pd_512;
3203
3204 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
3205 { CI->getArgOperand(0), CI->getArgOperand(1),
3206 CI->getArgOperand(4) });
3207 } else {
3208 Rep = Builder.CreateFSub(CI->getArgOperand(0), CI->getArgOperand(1));
3209 }
3210 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3211 CI->getArgOperand(2));
3212 } else if (IsX86 && (Name.startswith("avx512.mask.max.p") ||
3213 Name.startswith("avx512.mask.min.p")) &&
3214 Name.drop_front(18) == ".512") {
3215 bool IsDouble = Name[17] == 'd';
3216 bool IsMin = Name[13] == 'i';
3217 static const Intrinsic::ID MinMaxTbl[2][2] = {
3218 { Intrinsic::x86_avx512_max_ps_512, Intrinsic::x86_avx512_max_pd_512 },
3219 { Intrinsic::x86_avx512_min_ps_512, Intrinsic::x86_avx512_min_pd_512 }
3220 };
3221 Intrinsic::ID IID = MinMaxTbl[IsMin][IsDouble];
3222
3223 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
3224 { CI->getArgOperand(0), CI->getArgOperand(1),
3225 CI->getArgOperand(4) });
3226 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3227 CI->getArgOperand(2));
3228 } else if (IsX86 && Name.startswith("avx512.mask.lzcnt.")) {
3229 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(),
3230 Intrinsic::ctlz,
3231 CI->getType()),
3232 { CI->getArgOperand(0), Builder.getInt1(false) });
3233 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
3234 CI->getArgOperand(1));
3235 } else if (IsX86 && Name.startswith("avx512.mask.psll")) {
3236 bool IsImmediate = Name[16] == 'i' ||
3237 (Name.size() > 18 && Name[18] == 'i');
3238 bool IsVariable = Name[16] == 'v';
3239 char Size = Name[16] == '.' ? Name[17] :
3240 Name[17] == '.' ? Name[18] :
3241 Name[18] == '.' ? Name[19] :
3242 Name[20];
3243
3244 Intrinsic::ID IID;
3245 if (IsVariable && Name[17] != '.') {
3246 if (Size == 'd' && Name[17] == '2') // avx512.mask.psllv2.di
3247 IID = Intrinsic::x86_avx2_psllv_q;
3248 else if (Size == 'd' && Name[17] == '4') // avx512.mask.psllv4.di
3249 IID = Intrinsic::x86_avx2_psllv_q_256;
3250 else if (Size == 's' && Name[17] == '4') // avx512.mask.psllv4.si
3251 IID = Intrinsic::x86_avx2_psllv_d;
3252 else if (Size == 's' && Name[17] == '8') // avx512.mask.psllv8.si
3253 IID = Intrinsic::x86_avx2_psllv_d_256;
3254 else if (Size == 'h' && Name[17] == '8') // avx512.mask.psllv8.hi
3255 IID = Intrinsic::x86_avx512_psllv_w_128;
3256 else if (Size == 'h' && Name[17] == '1') // avx512.mask.psllv16.hi
3257 IID = Intrinsic::x86_avx512_psllv_w_256;
3258 else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psllv32hi
3259 IID = Intrinsic::x86_avx512_psllv_w_512;
3260 else
3261 llvm_unreachable("Unexpected size")::llvm::llvm_unreachable_internal("Unexpected size", "llvm/lib/IR/AutoUpgrade.cpp"
, 3261)
;
3262 } else if (Name.endswith(".128")) {
3263 if (Size == 'd') // avx512.mask.psll.d.128, avx512.mask.psll.di.128
3264 IID = IsImmediate ? Intrinsic::x86_sse2_pslli_d
3265 : Intrinsic::x86_sse2_psll_d;
3266 else if (Size == 'q') // avx512.mask.psll.q.128, avx512.mask.psll.qi.128
3267 IID = IsImmediate ? Intrinsic::x86_sse2_pslli_q
3268 : Intrinsic::x86_sse2_psll_q;
3269 else if (Size == 'w') // avx512.mask.psll.w.128, avx512.mask.psll.wi.128
3270 IID = IsImmediate ? Intrinsic::x86_sse2_pslli_w
3271 : Intrinsic::x86_sse2_psll_w;
3272 else
3273 llvm_unreachable("Unexpected size")::llvm::llvm_unreachable_internal("Unexpected size", "llvm/lib/IR/AutoUpgrade.cpp"
, 3273)
;
3274 } else if (Name.endswith(".256")) {
3275 if (Size == 'd') // avx512.mask.psll.d.256, avx512.mask.psll.di.256
3276 IID = IsImmediate ? Intrinsic::x86_avx2_pslli_d
3277 : Intrinsic::x86_avx2_psll_d;
3278 else if (Size == 'q') // avx512.mask.psll.q.256, avx512.mask.psll.qi.256
3279 IID = IsImmediate ? Intrinsic::x86_avx2_pslli_q
3280 : Intrinsic::x86_avx2_psll_q;
3281 else if (Size == 'w') // avx512.mask.psll.w.256, avx512.mask.psll.wi.256
3282 IID = IsImmediate ? Intrinsic::x86_avx2_pslli_w
3283 : Intrinsic::x86_avx2_psll_w;
3284 else
3285 llvm_unreachable("Unexpected size")::llvm::llvm_unreachable_internal("Unexpected size", "llvm/lib/IR/AutoUpgrade.cpp"
, 3285)
;
3286 } else {
3287 if (Size == 'd') // psll.di.512, pslli.d, psll.d, psllv.d.512
3288 IID = IsImmediate ? Intrinsic::x86_avx512_pslli_d_512 :
3289 IsVariable ? Intrinsic::x86_avx512_psllv_d_512 :
3290 Intrinsic::x86_avx512_psll_d_512;
3291 else if (Size == 'q') // psll.qi.512, pslli.q, psll.q, psllv.q.512
3292 IID = IsImmediate ? Intrinsic::x86_avx512_pslli_q_512 :
3293 IsVariable ? Intrinsic::x86_avx512_psllv_q_512 :
3294 Intrinsic::x86_avx512_psll_q_512;
3295 else if (Size == 'w') // psll.wi.512, pslli.w, psll.w
3296 IID = IsImmediate ? Intrinsic::x86_avx512_pslli_w_512
3297 : Intrinsic::x86_avx512_psll_w_512;
3298 else
3299 llvm_unreachable("Unexpected size")::llvm::llvm_unreachable_internal("Unexpected size", "llvm/lib/IR/AutoUpgrade.cpp"
, 3299)
;
3300 }
3301
3302 Rep = UpgradeX86MaskedShift(Builder, *CI, IID);
3303 } else if (IsX86 && Name.startswith("avx512.mask.psrl")) {
3304 bool IsImmediate = Name[16] == 'i' ||
3305 (Name.size() > 18 && Name[18] == 'i');
3306 bool IsVariable = Name[16] == 'v';
3307 char Size = Name[16] == '.' ? Name[17] :
3308 Name[17] == '.' ? Name[18] :
3309 Name[18] == '.' ? Name[19] :
3310 Name[20];
3311
3312 Intrinsic::ID IID;
3313 if (IsVariable && Name[17] != '.') {
3314 if (Size == 'd' && Name[17] == '2') // avx512.mask.psrlv2.di
3315 IID = Intrinsic::x86_avx2_psrlv_q;
3316 else if (Size == 'd' && Name[17] == '4') // avx512.mask.psrlv4.di
3317 IID = Intrinsic::x86_avx2_psrlv_q_256;
3318 else if (Size == 's' && Name[17] == '4') // avx512.mask.psrlv4.si
3319 IID = Intrinsic::x86_avx2_psrlv_d;
3320 else if (Size == 's' && Name[17] == '8') // avx512.mask.psrlv8.si
3321 IID = Intrinsic::x86_avx2_psrlv_d_256;
3322 else if (Size == 'h' && Name[17] == '8') // avx512.mask.psrlv8.hi
3323 IID = Intrinsic::x86_avx512_psrlv_w_128;
3324 else if (Size == 'h' && Name[17] == '1') // avx512.mask.psrlv16.hi
3325 IID = Intrinsic::x86_avx512_psrlv_w_256;
3326 else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psrlv32hi
3327 IID = Intrinsic::x86_avx512_psrlv_w_512;
3328 else
3329 llvm_unreachable("Unexpected size")::llvm::llvm_unreachable_internal("Unexpected size", "llvm/lib/IR/AutoUpgrade.cpp"
, 3329)
;
3330 } else if (Name.endswith(".128")) {
3331 if (Size == 'd') // avx512.mask.psrl.d.128, avx512.mask.psrl.di.128
3332 IID = IsImmediate ? Intrinsic::x86_sse2_psrli_d
3333 : Intrinsic::x86_sse2_psrl_d;
3334 else if (Size == 'q') // avx512.mask.psrl.q.128, avx512.mask.psrl.qi.128
3335 IID = IsImmediate ? Intrinsic::x86_sse2_psrli_q
3336 : Intrinsic::x86_sse2_psrl_q;
3337 else if (Size == 'w') // avx512.mask.psrl.w.128, avx512.mask.psrl.wi.128
3338 IID = IsImmediate ? Intrinsic::x86_sse2_psrli_w
3339 : Intrinsic::x86_sse2_psrl_w;
3340 else
3341 llvm_unreachable("Unexpected size")::llvm::llvm_unreachable_internal("Unexpected size", "llvm/lib/IR/AutoUpgrade.cpp"
, 3341)
;
3342 } else if (Name.endswith(".256")) {
3343 if (Size == 'd') // avx512.mask.psrl.d.256, avx512.mask.psrl.di.256
3344 IID = IsImmediate ? Intrinsic::x86_avx2_psrli_d
3345 : Intrinsic::x86_avx2_psrl_d;
3346 else if (Size == 'q') // avx512.mask.psrl.q.256, avx512.mask.psrl.qi.256
3347 IID = IsImmediate ? Intrinsic::x86_avx2_psrli_q
3348 : Intrinsic::x86_avx2_psrl_q;
3349 else if (Size == 'w') // avx512.mask.psrl.w.256, avx512.mask.psrl.wi.256
3350 IID = IsImmediate ? Intrinsic::x86_avx2_psrli_w
3351 : Intrinsic::x86_avx2_psrl_w;
3352 else
3353 llvm_unreachable("Unexpected size")::llvm::llvm_unreachable_internal("Unexpected size", "llvm/lib/IR/AutoUpgrade.cpp"
, 3353)
;
3354 } else {
3355 if (Size == 'd') // psrl.di.512, psrli.d, psrl.d, psrl.d.512
3356 IID = IsImmediate ? Intrinsic::x86_avx512_psrli_d_512 :
3357 IsVariable ? Intrinsic::x86_avx512_psrlv_d_512 :
3358 Intrinsic::x86_avx512_psrl_d_512;
3359 else if (Size == 'q') // psrl.qi.512, psrli.q, psrl.q, psrl.q.512
3360 IID = IsImmediate ? Intrinsic::x86_avx512_psrli_q_512 :
3361 IsVariable ? Intrinsic::x86_avx512_psrlv_q_512 :
3362 Intrinsic::x86_avx512_psrl_q_512;
3363 else if (Size == 'w') // psrl.wi.512, psrli.w, psrl.w)
3364 IID = IsImmediate ? Intrinsic::x86_avx512_psrli_w_512
3365 : Intrinsic::x86_avx512_psrl_w_512;
3366 else
3367 llvm_unreachable("Unexpected size")::llvm::llvm_unreachable_internal("Unexpected size", "llvm/lib/IR/AutoUpgrade.cpp"
, 3367)
;
3368 }
3369
3370 Rep = UpgradeX86MaskedShift(Builder, *CI, IID);
3371 } else if (IsX86 && Name.startswith("avx512.mask.psra")) {
3372 bool IsImmediate = Name[16] == 'i' ||
3373 (Name.size() > 18 && Name[18] == 'i');
3374 bool IsVariable = Name[16] == 'v';
3375 char Size = Name[16] == '.' ? Name[17] :
3376 Name[17] == '.' ? Name[18] :
3377 Name[18] == '.' ? Name[19] :
3378 Name[20];
3379
3380 Intrinsic::ID IID;
3381 if (IsVariable && Name[17] != '.') {
3382 if (Size == 's' && Name[17] == '4') // avx512.mask.psrav4.si
3383 IID = Intrinsic::x86_avx2_psrav_d;
3384 else if (Size == 's' && Name[17] == '8') // avx512.mask.psrav8.si
3385 IID = Intrinsic::x86_avx2_psrav_d_256;
3386 else if (Size == 'h' && Name[17] == '8') // avx512.mask.psrav8.hi
3387 IID = Intrinsic::x86_avx512_psrav_w_128;
3388 else if (Size == 'h' && Name[17] == '1') // avx512.mask.psrav16.hi
3389 IID = Intrinsic::x86_avx512_psrav_w_256;
3390 else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psrav32hi
3391 IID = Intrinsic::x86_avx512_psrav_w_512;
3392 else
3393 llvm_unreachable("Unexpected size")::llvm::llvm_unreachable_internal("Unexpected size", "llvm/lib/IR/AutoUpgrade.cpp"
, 3393)
;
3394 } else if (Name.endswith(".128")) {
3395 if (Size == 'd') // avx512.mask.psra.d.128, avx512.mask.psra.di.128
3396 IID = IsImmediate ? Intrinsic::x86_sse2_psrai_d
3397 : Intrinsic::x86_sse2_psra_d;
3398 else if (Size == 'q') // avx512.mask.psra.q.128, avx512.mask.psra.qi.128
3399 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_128 :
3400 IsVariable ? Intrinsic::x86_avx512_psrav_q_128 :
3401 Intrinsic::x86_avx512_psra_q_128;
3402 else if (Size == 'w') // avx512.mask.psra.w.128, avx512.mask.psra.wi.128
3403 IID = IsImmediate ? Intrinsic::x86_sse2_psrai_w
3404 : Intrinsic::x86_sse2_psra_w;
3405 else
3406 llvm_unreachable("Unexpected size")::llvm::llvm_unreachable_internal("Unexpected size", "llvm/lib/IR/AutoUpgrade.cpp"
, 3406)
;
3407 } else if (Name.endswith(".256")) {
3408 if (Size == 'd') // avx512.mask.psra.d.256, avx512.mask.psra.di.256
3409 IID = IsImmediate ? Intrinsic::x86_avx2_psrai_d
3410 : Intrinsic::x86_avx2_psra_d;
3411 else if (Size == 'q') // avx512.mask.psra.q.256, avx512.mask.psra.qi.256
3412 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_256 :
3413 IsVariable ? Intrinsic::x86_avx512_psrav_q_256 :
3414 Intrinsic::x86_avx512_psra_q_256;
3415 else if (Size == 'w') // avx512.mask.psra.w.256, avx512.mask.psra.wi.256
3416 IID = IsImmediate ? Intrinsic::x86_avx2_psrai_w
3417 : Intrinsic::x86_avx2_psra_w;
3418 else
3419 llvm_unreachable("Unexpected size")::llvm::llvm_unreachable_internal("Unexpected size", "llvm/lib/IR/AutoUpgrade.cpp"
, 3419)
;
3420 } else {
3421 if (Size == 'd') // psra.di.512, psrai.d, psra.d, psrav.d.512
3422 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_d_512 :
3423 IsVariable ? Intrinsic::x86_avx512_psrav_d_512 :
3424 Intrinsic::x86_avx512_psra_d_512;
3425 else if (Size == 'q') // psra.qi.512, psrai.q, psra.q
3426 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_512 :
3427 IsVariable ? Intrinsic::x86_avx512_psrav_q_512 :
3428 Intrinsic::x86_avx512_psra_q_512;
3429 else if (Size == 'w') // psra.wi.512, psrai.w, psra.w
3430 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_w_512
3431 : Intrinsic::x86_avx512_psra_w_512;
3432 else
3433 llvm_unreachable("Unexpected size")::llvm::llvm_unreachable_internal("Unexpected size", "llvm/lib/IR/AutoUpgrade.cpp"
, 3433)
;
3434 }
3435
3436 Rep = UpgradeX86MaskedShift(Builder, *CI, IID);
3437 } else if (IsX86 && Name.startswith("avx512.mask.move.s")) {
3438 Rep = upgradeMaskedMove(Builder, *CI);
3439 } else if (IsX86 && Name.startswith("avx512.cvtmask2")) {
3440 Rep = UpgradeMaskToInt(Builder, *CI);
3441 } else if (IsX86 && Name.endswith(".movntdqa")) {
3442 Module *M = F->getParent();
3443 MDNode *Node = MDNode::get(
3444 C, ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1)));
3445
3446 Value *Ptr = CI->getArgOperand(0);
3447
3448 // Convert the type of the pointer to a pointer to the stored type.
3449 Value *BC = Builder.CreateBitCast(
3450 Ptr, PointerType::getUnqual(CI->getType()), "cast");
3451 LoadInst *LI = Builder.CreateAlignedLoad(
3452 CI->getType(), BC,
3453 Align(CI->getType()->getPrimitiveSizeInBits().getFixedSize() / 8));
3454 LI->setMetadata(M->getMDKindID("nontemporal"), Node);
3455 Rep = LI;
3456 } else if (IsX86 && (Name.startswith("fma.vfmadd.") ||
3457 Name.startswith("fma.vfmsub.") ||
3458 Name.startswith("fma.vfnmadd.") ||
3459 Name.startswith("fma.vfnmsub."))) {
3460 bool NegMul = Name[6] == 'n';
3461 bool NegAcc = NegMul ? Name[8] == 's' : Name[7] == 's';
3462 bool IsScalar = NegMul ? Name[12] == 's' : Name[11] == 's';
3463
3464 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1),
3465 CI->getArgOperand(2) };
3466
3467 if (IsScalar) {
3468 Ops[0] = Builder.CreateExtractElement(Ops[0], (uint64_t)0);
3469 Ops[1] = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
3470 Ops[2] = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
3471 }
3472
3473 if (NegMul && !IsScalar)
3474 Ops[0] = Builder.CreateFNeg(Ops[0]);
3475 if (NegMul && IsScalar)
3476 Ops[1] = Builder.CreateFNeg(Ops[1]);
3477 if (NegAcc)
3478 Ops[2] = Builder.CreateFNeg(Ops[2]);
3479
3480 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(),
3481 Intrinsic::fma,
3482 Ops[0]->getType()),
3483 Ops);
3484
3485 if (IsScalar)
3486 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep,
3487 (uint64_t)0);
3488 } else if (IsX86 && Name.startswith("fma4.vfmadd.s")) {
3489 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1),
3490 CI->getArgOperand(2) };
3491
3492 Ops[0] = Builder.CreateExtractElement(Ops[0], (uint64_t)0);
3493 Ops[1] = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
3494 Ops[2] = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
3495
3496 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(),
3497 Intrinsic::fma,
3498 Ops[0]->getType()),
3499 Ops);
3500
3501 Rep = Builder.CreateInsertElement(Constant::getNullValue(CI->getType()),
3502 Rep, (uint64_t)0);
3503 } else if (IsX86 && (Name.startswith("avx512.mask.vfmadd.s") ||
3504 Name.startswith("avx512.maskz.vfmadd.s") ||
3505 Name.startswith("avx512.mask3.vfmadd.s") ||
3506 Name.startswith("avx512.mask3.vfmsub.s") ||
3507 Name.startswith("avx512.mask3.vfnmsub.s"))) {
3508 bool IsMask3 = Name[11] == '3';
3509 bool IsMaskZ = Name[11] == 'z';
3510 // Drop the "avx512.mask." to make it easier.
3511 Name = Name.drop_front(IsMask3 || IsMaskZ ? 13 : 12);
3512 bool NegMul = Name[2] == 'n';
3513 bool NegAcc = NegMul ? Name[4] == 's' : Name[3] == 's';
3514
3515 Value *A = CI->getArgOperand(0);
3516 Value *B = CI->getArgOperand(1);
3517 Value *C = CI->getArgOperand(2);
3518
3519 if (NegMul && (IsMask3 || IsMaskZ))
3520 A = Builder.CreateFNeg(A);
3521 if (NegMul && !(IsMask3 || IsMaskZ))
3522 B = Builder.CreateFNeg(B);
3523 if (NegAcc)
3524 C = Builder.CreateFNeg(C);
3525
3526 A = Builder.CreateExtractElement(A, (uint64_t)0);
3527 B = Builder.CreateExtractElement(B, (uint64_t)0);
3528 C = Builder.CreateExtractElement(C, (uint64_t)0);
3529
3530 if (!isa<ConstantInt>(CI->getArgOperand(4)) ||
3531 cast<ConstantInt>(CI->getArgOperand(4))->getZExtValue() != 4) {
3532 Value *Ops[] = { A, B, C, CI->getArgOperand(4) };
3533
3534 Intrinsic::ID IID;
3535 if (Name.back() == 'd')
3536 IID = Intrinsic::x86_avx512_vfmadd_f64;
3537 else
3538 IID = Intrinsic::x86_avx512_vfmadd_f32;
3539 Function *FMA = Intrinsic::getDeclaration(CI->getModule(), IID);
3540 Rep = Builder.CreateCall(FMA, Ops);
3541 } else {
3542 Function *FMA = Intrinsic::getDeclaration(CI->getModule(),
3543 Intrinsic::fma,
3544 A->getType());
3545 Rep = Builder.CreateCall(FMA, { A, B, C });
3546 }
3547
3548 Value *PassThru = IsMaskZ ? Constant::getNullValue(Rep->getType()) :
3549 IsMask3 ? C : A;
3550
3551 // For Mask3 with NegAcc, we need to create a new extractelement that
3552 // avoids the negation above.
3553 if (NegAcc && IsMask3)
3554 PassThru = Builder.CreateExtractElement(CI->getArgOperand(2),
3555 (uint64_t)0);
3556
3557 Rep = EmitX86ScalarSelect(Builder, CI->getArgOperand(3),
3558 Rep, PassThru);
3559 Rep = Builder.CreateInsertElement(CI->getArgOperand(IsMask3 ? 2 : 0),
3560 Rep, (uint64_t)0);
3561 } else if (IsX86 && (Name.startswith("avx512.mask.vfmadd.p") ||
3562 Name.startswith("avx512.mask.vfnmadd.p") ||
3563 Name.startswith("avx512.mask.vfnmsub.p") ||
3564 Name.startswith("avx512.mask3.vfmadd.p") ||
3565 Name.startswith("avx512.mask3.vfmsub.p") ||
3566 Name.startswith("avx512.mask3.vfnmsub.p") ||
3567 Name.startswith("avx512.maskz.vfmadd.p"))) {
3568 bool IsMask3 = Name[11] == '3';
3569 bool IsMaskZ = Name[11] == 'z';
3570 // Drop the "avx512.mask." to make it easier.
3571 Name = Name.drop_front(IsMask3 || IsMaskZ ? 13 : 12);
3572 bool NegMul = Name[2] == 'n';
3573 bool NegAcc = NegMul ? Name[4] == 's' : Name[3] == 's';
3574
3575 Value *A = CI->getArgOperand(0);
3576 Value *B = CI->getArgOperand(1);
3577 Value *C = CI->getArgOperand(2);
3578
3579 if (NegMul && (IsMask3 || IsMaskZ))
3580 A = Builder.CreateFNeg(A);
3581 if (NegMul && !(IsMask3 || IsMaskZ))
3582 B = Builder.CreateFNeg(B);
3583 if (NegAcc)
3584 C = Builder.CreateFNeg(C);
3585
3586 if (CI->arg_size() == 5 &&
3587 (!isa<ConstantInt>(CI->getArgOperand(4)) ||
3588 cast<ConstantInt>(CI->getArgOperand(4))->getZExtValue() != 4)) {
3589 Intrinsic::ID IID;
3590 // Check the character before ".512" in string.
3591 if (Name[Name.size()-5] == 's')
3592 IID = Intrinsic::x86_avx512_vfmadd_ps_512;
3593 else
3594 IID = Intrinsic::x86_avx512_vfmadd_pd_512;
3595
3596 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
3597 { A, B, C, CI->getArgOperand(4) });
3598 } else {
3599 Function *FMA = Intrinsic::getDeclaration(CI->getModule(),
3600 Intrinsic::fma,
3601 A->getType());
3602 Rep = Builder.CreateCall(FMA, { A, B, C });
3603 }
3604
3605 Value *PassThru = IsMaskZ ? llvm::Constant::getNullValue(CI->getType()) :
3606 IsMask3 ? CI->getArgOperand(2) :
3607 CI->getArgOperand(0);
3608
3609 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
3610 } else if (IsX86 && Name.startswith("fma.vfmsubadd.p")) {
3611 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
3612 unsigned EltWidth = CI->getType()->getScalarSizeInBits();
3613 Intrinsic::ID IID;
3614 if (VecWidth == 128 && EltWidth == 32)
3615 IID = Intrinsic::x86_fma_vfmaddsub_ps;
3616 else if (VecWidth == 256 && EltWidth == 32)
3617 IID = Intrinsic::x86_fma_vfmaddsub_ps_256;
3618 else if (VecWidth == 128 && EltWidth == 64)
3619 IID = Intrinsic::x86_fma_vfmaddsub_pd;
3620 else if (VecWidth == 256 && EltWidth == 64)
3621 IID = Intrinsic::x86_fma_vfmaddsub_pd_256;
3622 else
3623 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 3623)
;
3624
3625 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1),
3626 CI->getArgOperand(2) };
3627 Ops[2] = Builder.CreateFNeg(Ops[2]);
3628 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
3629 Ops);
3630 } else if (IsX86 && (Name.startswith("avx512.mask.vfmaddsub.p") ||
3631 Name.startswith("avx512.mask3.vfmaddsub.p") ||
3632 Name.startswith("avx512.maskz.vfmaddsub.p") ||
3633 Name.startswith("avx512.mask3.vfmsubadd.p"))) {
3634 bool IsMask3 = Name[11] == '3';
3635 bool IsMaskZ = Name[11] == 'z';
3636 // Drop the "avx512.mask." to make it easier.
3637 Name = Name.drop_front(IsMask3 || IsMaskZ ? 13 : 12);
3638 bool IsSubAdd = Name[3] == 's';
3639 if (CI->arg_size() == 5) {
3640 Intrinsic::ID IID;
3641 // Check the character before ".512" in string.
3642 if (Name[Name.size()-5] == 's')
3643 IID = Intrinsic::x86_avx512_vfmaddsub_ps_512;
3644 else
3645 IID = Intrinsic::x86_avx512_vfmaddsub_pd_512;
3646
3647 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1),
3648 CI->getArgOperand(2), CI->getArgOperand(4) };
3649 if (IsSubAdd)
3650 Ops[2] = Builder.CreateFNeg(Ops[2]);
3651
3652 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
3653 Ops);
3654 } else {
3655 int NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
3656
3657 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1),
3658 CI->getArgOperand(2) };
3659
3660 Function *FMA = Intrinsic::getDeclaration(CI->getModule(), Intrinsic::fma,
3661 Ops[0]->getType());
3662 Value *Odd = Builder.CreateCall(FMA, Ops);
3663 Ops[2] = Builder.CreateFNeg(Ops[2]);
3664 Value *Even = Builder.CreateCall(FMA, Ops);
3665
3666 if (IsSubAdd)
3667 std::swap(Even, Odd);
3668
3669 SmallVector<int, 32> Idxs(NumElts);
3670 for (int i = 0; i != NumElts; ++i)
3671 Idxs[i] = i + (i % 2) * NumElts;
3672
3673 Rep = Builder.CreateShuffleVector(Even, Odd, Idxs);
3674 }
3675
3676 Value *PassThru = IsMaskZ ? llvm::Constant::getNullValue(CI->getType()) :
3677 IsMask3 ? CI->getArgOperand(2) :
3678 CI->getArgOperand(0);
3679
3680 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
3681 } else if (IsX86 && (Name.startswith("avx512.mask.pternlog.") ||
3682 Name.startswith("avx512.maskz.pternlog."))) {
3683 bool ZeroMask = Name[11] == 'z';
3684 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
3685 unsigned EltWidth = CI->getType()->getScalarSizeInBits();
3686 Intrinsic::ID IID;
3687 if (VecWidth == 128 && EltWidth == 32)
3688 IID = Intrinsic::x86_avx512_pternlog_d_128;
3689 else if (VecWidth == 256 && EltWidth == 32)
3690 IID = Intrinsic::x86_avx512_pternlog_d_256;
3691 else if (VecWidth == 512 && EltWidth == 32)
3692 IID = Intrinsic::x86_avx512_pternlog_d_512;
3693 else if (VecWidth == 128 && EltWidth == 64)
3694 IID = Intrinsic::x86_avx512_pternlog_q_128;
3695 else if (VecWidth == 256 && EltWidth == 64)
3696 IID = Intrinsic::x86_avx512_pternlog_q_256;
3697 else if (VecWidth == 512 && EltWidth == 64)
3698 IID = Intrinsic::x86_avx512_pternlog_q_512;
3699 else
3700 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 3700)
;
3701
3702 Value *Args[] = { CI->getArgOperand(0) , CI->getArgOperand(1),
3703 CI->getArgOperand(2), CI->getArgOperand(3) };
3704 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID),
3705 Args);
3706 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType())
3707 : CI->getArgOperand(0);
3708 Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep, PassThru);
3709 } else if (IsX86 && (Name.startswith("avx512.mask.vpmadd52") ||
3710 Name.startswith("avx512.maskz.vpmadd52"))) {
3711 bool ZeroMask = Name[11] == 'z';
3712 bool High = Name[20] == 'h' || Name[21] == 'h';
3713 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
3714 Intrinsic::ID IID;
3715 if (VecWidth == 128 && !High)
3716 IID = Intrinsic::x86_avx512_vpmadd52l_uq_128;
3717 else if (VecWidth == 256 && !High)
3718 IID = Intrinsic::x86_avx512_vpmadd52l_uq_256;
3719 else if (VecWidth == 512 && !High)
3720 IID = Intrinsic::x86_avx512_vpmadd52l_uq_512;
3721 else if (VecWidth == 128 && High)
3722 IID = Intrinsic::x86_avx512_vpmadd52h_uq_128;
3723 else if (VecWidth == 256 && High)
3724 IID = Intrinsic::x86_avx512_vpmadd52h_uq_256;
3725 else if (VecWidth == 512 && High)
3726 IID = Intrinsic::x86_avx512_vpmadd52h_uq_512;
3727 else
3728 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 3728)
;
3729
3730 Value *Args[] = { CI->getArgOperand(0) , CI->getArgOperand(1),
3731 CI->getArgOperand(2) };
3732 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID),
3733 Args);
3734 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType())
3735 : CI->getArgOperand(0);
3736 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
3737 } else if (IsX86 && (Name.startswith("avx512.mask.vpermi2var.") ||
3738 Name.startswith("avx512.mask.vpermt2var.") ||
3739 Name.startswith("avx512.maskz.vpermt2var."))) {
3740 bool ZeroMask = Name[11] == 'z';
3741 bool IndexForm = Name[17] == 'i';
3742 Rep = UpgradeX86VPERMT2Intrinsics(Builder, *CI, ZeroMask, IndexForm);
3743 } else if (IsX86 && (Name.startswith("avx512.mask.vpdpbusd.") ||
3744 Name.startswith("avx512.maskz.vpdpbusd.") ||
3745 Name.startswith("avx512.mask.vpdpbusds.") ||
3746 Name.startswith("avx512.maskz.vpdpbusds."))) {
3747 bool ZeroMask = Name[11] == 'z';
3748 bool IsSaturating = Name[ZeroMask ? 21 : 20] == 's';
3749 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
3750 Intrinsic::ID IID;
3751 if (VecWidth == 128 && !IsSaturating)
3752 IID = Intrinsic::x86_avx512_vpdpbusd_128;
3753 else if (VecWidth == 256 && !IsSaturating)
3754 IID = Intrinsic::x86_avx512_vpdpbusd_256;
3755 else if (VecWidth == 512 && !IsSaturating)
3756 IID = Intrinsic::x86_avx512_vpdpbusd_512;
3757 else if (VecWidth == 128 && IsSaturating)
3758 IID = Intrinsic::x86_avx512_vpdpbusds_128;
3759 else if (VecWidth == 256 && IsSaturating)
3760 IID = Intrinsic::x86_avx512_vpdpbusds_256;
3761 else if (VecWidth == 512 && IsSaturating)
3762 IID = Intrinsic::x86_avx512_vpdpbusds_512;
3763 else
3764 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 3764)
;
3765
3766 Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(1),
3767 CI->getArgOperand(2) };
3768 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID),
3769 Args);
3770 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType())
3771 : CI->getArgOperand(0);
3772 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
3773 } else if (IsX86 && (Name.startswith("avx512.mask.vpdpwssd.") ||
3774 Name.startswith("avx512.maskz.vpdpwssd.") ||
3775 Name.startswith("avx512.mask.vpdpwssds.") ||
3776 Name.startswith("avx512.maskz.vpdpwssds."))) {
3777 bool ZeroMask = Name[11] == 'z';
3778 bool IsSaturating = Name[ZeroMask ? 21 : 20] == 's';
3779 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
3780 Intrinsic::ID IID;
3781 if (VecWidth == 128 && !IsSaturating)
3782 IID = Intrinsic::x86_avx512_vpdpwssd_128;
3783 else if (VecWidth == 256 && !IsSaturating)
3784 IID = Intrinsic::x86_avx512_vpdpwssd_256;
3785 else if (VecWidth == 512 && !IsSaturating)
3786 IID = Intrinsic::x86_avx512_vpdpwssd_512;
3787 else if (VecWidth == 128 && IsSaturating)
3788 IID = Intrinsic::x86_avx512_vpdpwssds_128;
3789 else if (VecWidth == 256 && IsSaturating)
3790 IID = Intrinsic::x86_avx512_vpdpwssds_256;
3791 else if (VecWidth == 512 && IsSaturating)
3792 IID = Intrinsic::x86_avx512_vpdpwssds_512;
3793 else
3794 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 3794)
;
3795
3796 Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(1),
3797 CI->getArgOperand(2) };
3798 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID),
3799 Args);
3800 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType())
3801 : CI->getArgOperand(0);
3802 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
3803 } else if (IsX86 && (Name == "addcarryx.u32" || Name == "addcarryx.u64" ||
3804 Name == "addcarry.u32" || Name == "addcarry.u64" ||
3805 Name == "subborrow.u32" || Name == "subborrow.u64")) {
3806 Intrinsic::ID IID;
3807 if (Name[0] == 'a' && Name.back() == '2')
3808 IID = Intrinsic::x86_addcarry_32;
3809 else if (Name[0] == 'a' && Name.back() == '4')
3810 IID = Intrinsic::x86_addcarry_64;
3811 else if (Name[0] == 's' && Name.back() == '2')
3812 IID = Intrinsic::x86_subborrow_32;
3813 else if (Name[0] == 's' && Name.back() == '4')
3814 IID = Intrinsic::x86_subborrow_64;
3815 else
3816 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 3816)
;
3817
3818 // Make a call with 3 operands.
3819 Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(1),
3820 CI->getArgOperand(2)};
3821 Value *NewCall = Builder.CreateCall(
3822 Intrinsic::getDeclaration(CI->getModule(), IID),
3823 Args);
3824
3825 // Extract the second result and store it.
3826 Value *Data = Builder.CreateExtractValue(NewCall, 1);
3827 // Cast the pointer to the right type.
3828 Value *Ptr = Builder.CreateBitCast(CI->getArgOperand(3),
3829 llvm::PointerType::getUnqual(Data->getType()));
3830 Builder.CreateAlignedStore(Data, Ptr, Align(1));
3831 // Replace the original call result with the first result of the new call.
3832 Value *CF = Builder.CreateExtractValue(NewCall, 0);
3833
3834 CI->replaceAllUsesWith(CF);
3835 Rep = nullptr;
3836 } else if (IsX86 && Name.startswith("avx512.mask.") &&
3837 upgradeAVX512MaskToSelect(Name, Builder, *CI, Rep)) {
3838 // Rep will be updated by the call in the condition.
3839 } else if (IsNVVM && (Name == "abs.i" || Name == "abs.ll")) {
3840 Value *Arg = CI->getArgOperand(0);
3841 Value *Neg = Builder.CreateNeg(Arg, "neg");
3842 Value *Cmp = Builder.CreateICmpSGE(
3843 Arg, llvm::Constant::getNullValue(Arg->getType()), "abs.cond");
3844 Rep = Builder.CreateSelect(Cmp, Arg, Neg, "abs");
3845 } else if (IsNVVM && (Name.startswith("atomic.load.add.f32.p") ||
3846 Name.startswith("atomic.load.add.f64.p"))) {
3847 Value *Ptr = CI->getArgOperand(0);
3848 Value *Val = CI->getArgOperand(1);
3849 Rep = Builder.CreateAtomicRMW(AtomicRMWInst::FAdd, Ptr, Val, MaybeAlign(),
3850 AtomicOrdering::SequentiallyConsistent);
3851 } else if (IsNVVM && (Name == "max.i" || Name == "max.ll" ||
3852 Name == "max.ui" || Name == "max.ull")) {
3853 Value *Arg0 = CI->getArgOperand(0);
3854 Value *Arg1 = CI->getArgOperand(1);
3855 Value *Cmp = Name.endswith(".ui") || Name.endswith(".ull")
3856 ? Builder.CreateICmpUGE(Arg0, Arg1, "max.cond")
3857 : Builder.CreateICmpSGE(Arg0, Arg1, "max.cond");
3858 Rep = Builder.CreateSelect(Cmp, Arg0, Arg1, "max");
3859 } else if (IsNVVM && (Name == "min.i" || Name == "min.ll" ||
3860 Name == "min.ui" || Name == "min.ull")) {
3861 Value *Arg0 = CI->getArgOperand(0);
3862 Value *Arg1 = CI->getArgOperand(1);
3863 Value *Cmp = Name.endswith(".ui") || Name.endswith(".ull")
3864 ? Builder.CreateICmpULE(Arg0, Arg1, "min.cond")
3865 : Builder.CreateICmpSLE(Arg0, Arg1, "min.cond");
3866 Rep = Builder.CreateSelect(Cmp, Arg0, Arg1, "min");
3867 } else if (IsNVVM && Name == "clz.ll") {
3868 // llvm.nvvm.clz.ll returns an i32, but llvm.ctlz.i64 and returns an i64.
3869 Value *Arg = CI->getArgOperand(0);
3870 Value *Ctlz = Builder.CreateCall(
3871 Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctlz,
3872 {Arg->getType()}),
3873 {Arg, Builder.getFalse()}, "ctlz");
3874 Rep = Builder.CreateTrunc(Ctlz, Builder.getInt32Ty(), "ctlz.trunc");
3875 } else if (IsNVVM && Name == "popc.ll") {
3876 // llvm.nvvm.popc.ll returns an i32, but llvm.ctpop.i64 and returns an
3877 // i64.
3878 Value *Arg = CI->getArgOperand(0);
3879 Value *Popc = Builder.CreateCall(
3880 Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctpop,
3881 {Arg->getType()}),
3882 Arg, "ctpop");
3883 Rep = Builder.CreateTrunc(Popc, Builder.getInt32Ty(), "ctpop.trunc");
3884 } else if (IsNVVM && Name == "h2f") {
3885 Rep = Builder.CreateCall(Intrinsic::getDeclaration(
3886 F->getParent(), Intrinsic::convert_from_fp16,
3887 {Builder.getFloatTy()}),
3888 CI->getArgOperand(0), "h2f");
3889 } else if (IsARM) {
3890 Rep = UpgradeARMIntrinsicCall(Name, CI, F, Builder);
3891 } else {
3892 llvm_unreachable("Unknown function for CallBase upgrade.")::llvm::llvm_unreachable_internal("Unknown function for CallBase upgrade."
, "llvm/lib/IR/AutoUpgrade.cpp", 3892)
;
3893 }
3894
3895 if (Rep)
3896 CI->replaceAllUsesWith(Rep);
3897 CI->eraseFromParent();
3898 return;
3899 }
3900
3901 const auto &DefaultCase = [&]() -> void {
3902 if (CI->getFunctionType() == NewFn->getFunctionType()) {
3903 // Handle generic mangling change.
3904 assert((static_cast <bool> ((CI->getCalledFunction()->getName
() != NewFn->getName()) && "Unknown function for CallBase upgrade and isn't just a name change"
) ? void (0) : __assert_fail ("(CI->getCalledFunction()->getName() != NewFn->getName()) && \"Unknown function for CallBase upgrade and isn't just a name change\""
, "llvm/lib/IR/AutoUpgrade.cpp", 3906, __extension__ __PRETTY_FUNCTION__
))
3905 (CI->getCalledFunction()->getName() != NewFn->getName()) &&(static_cast <bool> ((CI->getCalledFunction()->getName
() != NewFn->getName()) && "Unknown function for CallBase upgrade and isn't just a name change"
) ? void (0) : __assert_fail ("(CI->getCalledFunction()->getName() != NewFn->getName()) && \"Unknown function for CallBase upgrade and isn't just a name change\""
, "llvm/lib/IR/AutoUpgrade.cpp", 3906, __extension__ __PRETTY_FUNCTION__
))
3906 "Unknown function for CallBase upgrade and isn't just a name change")(static_cast <bool> ((CI->getCalledFunction()->getName
() != NewFn->getName()) && "Unknown function for CallBase upgrade and isn't just a name change"
) ? void (0) : __assert_fail ("(CI->getCalledFunction()->getName() != NewFn->getName()) && \"Unknown function for CallBase upgrade and isn't just a name change\""
, "llvm/lib/IR/AutoUpgrade.cpp", 3906, __extension__ __PRETTY_FUNCTION__
))
;
3907 CI->setCalledFunction(NewFn);
3908 return;
3909 }
3910
3911 // This must be an upgrade from a named to a literal struct.
3912 auto *OldST = cast<StructType>(CI->getType());
3913 assert(OldST != NewFn->getReturnType() && "Return type must have changed")(static_cast <bool> (OldST != NewFn->getReturnType()
&& "Return type must have changed") ? void (0) : __assert_fail
("OldST != NewFn->getReturnType() && \"Return type must have changed\""
, "llvm/lib/IR/AutoUpgrade.cpp", 3913, __extension__ __PRETTY_FUNCTION__
))
;
3914 assert(OldST->getNumElements() ==(static_cast <bool> (OldST->getNumElements() == cast
<StructType>(NewFn->getReturnType())->getNumElements
() && "Must have same number of elements") ? void (0)
: __assert_fail ("OldST->getNumElements() == cast<StructType>(NewFn->getReturnType())->getNumElements() && \"Must have same number of elements\""
, "llvm/lib/IR/AutoUpgrade.cpp", 3916, __extension__ __PRETTY_FUNCTION__
))
3915 cast<StructType>(NewFn->getReturnType())->getNumElements() &&(static_cast <bool> (OldST->getNumElements() == cast
<StructType>(NewFn->getReturnType())->getNumElements
() && "Must have same number of elements") ? void (0)
: __assert_fail ("OldST->getNumElements() == cast<StructType>(NewFn->getReturnType())->getNumElements() && \"Must have same number of elements\""
, "llvm/lib/IR/AutoUpgrade.cpp", 3916, __extension__ __PRETTY_FUNCTION__
))
3916 "Must have same number of elements")(static_cast <bool> (OldST->getNumElements() == cast
<StructType>(NewFn->getReturnType())->getNumElements
() && "Must have same number of elements") ? void (0)
: __assert_fail ("OldST->getNumElements() == cast<StructType>(NewFn->getReturnType())->getNumElements() && \"Must have same number of elements\""
, "llvm/lib/IR/AutoUpgrade.cpp", 3916, __extension__ __PRETTY_FUNCTION__
))
;
3917
3918 SmallVector<Value *> Args(CI->args());
3919 Value *NewCI = Builder.CreateCall(NewFn, Args);
3920 Value *Res = PoisonValue::get(OldST);
3921 for (unsigned Idx = 0; Idx < OldST->getNumElements(); ++Idx) {
3922 Value *Elem = Builder.CreateExtractValue(NewCI, Idx);
3923 Res = Builder.CreateInsertValue(Res, Elem, Idx);
3924 }
3925 CI->replaceAllUsesWith(Res);
3926 CI->eraseFromParent();
3927 return;
3928 };
3929 CallInst *NewCall = nullptr;
3930 switch (NewFn->getIntrinsicID()) {
4
Control jumps to 'case vector_extract:' at line 3971
3931 default: {
3932 DefaultCase();
3933 return;
3934 }
3935 case Intrinsic::arm_neon_vst1:
3936 case Intrinsic::arm_neon_vst2:
3937 case Intrinsic::arm_neon_vst3:
3938 case Intrinsic::arm_neon_vst4:
3939 case Intrinsic::arm_neon_vst2lane:
3940 case Intrinsic::arm_neon_vst3lane:
3941 case Intrinsic::arm_neon_vst4lane: {
3942 SmallVector<Value *, 4> Args(CI->args());
3943 NewCall = Builder.CreateCall(NewFn, Args);
3944 break;
3945 }
3946 case Intrinsic::aarch64_sve_ld3_sret:
3947 case Intrinsic::aarch64_sve_ld4_sret:
3948 case Intrinsic::aarch64_sve_ld2_sret: {
3949 StringRef Name = F->getName();
3950 Name = Name.substr(5);
3951 unsigned N = StringSwitch<unsigned>(Name)
3952 .StartsWith("aarch64.sve.ld2", 2)
3953 .StartsWith("aarch64.sve.ld3", 3)
3954 .StartsWith("aarch64.sve.ld4", 4)
3955 .Default(0);
3956 ScalableVectorType *RetTy =
3957 dyn_cast<ScalableVectorType>(F->getReturnType());
3958 unsigned MinElts = RetTy->getMinNumElements() / N;
3959 SmallVector<Value *, 2> Args(CI->args());
3960 Value *NewLdCall = Builder.CreateCall(NewFn, Args);
3961 Value *Ret = llvm::PoisonValue::get(RetTy);
3962 for (unsigned I = 0; I < N; I++) {
3963 Value *Idx = ConstantInt::get(Type::getInt64Ty(C), I * MinElts);
3964 Value *SRet = Builder.CreateExtractValue(NewLdCall, I);
3965 Ret = Builder.CreateInsertVector(RetTy, Ret, SRet, Idx);
3966 }
3967 NewCall = dyn_cast<CallInst>(Ret);
3968 break;
3969 }
3970
3971 case Intrinsic::vector_extract: {
3972 StringRef Name = F->getName();
3973 Name = Name.substr(5); // Strip llvm
3974 if (!Name.startswith("aarch64.sve.tuple.get")) {
5
Assuming the condition is false
6
Taking false branch
3975 DefaultCase();
3976 return;
3977 }
3978 ScalableVectorType *RetTy =
8
'RetTy' initialized to a null pointer value
3979 dyn_cast<ScalableVectorType>(F->getReturnType());
7
Assuming the object is not a 'CastReturnType'
3980 unsigned MinElts = RetTy->getMinNumElements();
9
Called C++ object pointer is null
3981 unsigned I = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
3982 Value *NewIdx = ConstantInt::get(Type::getInt64Ty(C), I * MinElts);
3983 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0), NewIdx});
3984 break;
3985 }
3986
3987 case Intrinsic::vector_insert: {
3988 StringRef Name = F->getName();
3989 Name = Name.substr(5);
3990 if (!Name.startswith("aarch64.sve.tuple")) {
3991 DefaultCase();
3992 return;
3993 }
3994 if (Name.startswith("aarch64.sve.tuple.set")) {
3995 unsigned I = dyn_cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
3996 ScalableVectorType *Ty =
3997 dyn_cast<ScalableVectorType>(CI->getArgOperand(2)->getType());
3998 Value *NewIdx =
3999 ConstantInt::get(Type::getInt64Ty(C), I * Ty->getMinNumElements());
4000 NewCall = Builder.CreateCall(
4001 NewFn, {CI->getArgOperand(0), CI->getArgOperand(2), NewIdx});
4002 break;
4003 }
4004 if (Name.startswith("aarch64.sve.tuple.create")) {
4005 unsigned N = StringSwitch<unsigned>(Name)
4006 .StartsWith("aarch64.sve.tuple.create2", 2)
4007 .StartsWith("aarch64.sve.tuple.create3", 3)
4008 .StartsWith("aarch64.sve.tuple.create4", 4)
4009 .Default(0);
4010 assert(N > 1 && "Create is expected to be between 2-4")(static_cast <bool> (N > 1 && "Create is expected to be between 2-4"
) ? void (0) : __assert_fail ("N > 1 && \"Create is expected to be between 2-4\""
, "llvm/lib/IR/AutoUpgrade.cpp", 4010, __extension__ __PRETTY_FUNCTION__
))
;
4011 ScalableVectorType *RetTy =
4012 dyn_cast<ScalableVectorType>(F->getReturnType());
4013 Value *Ret = llvm::PoisonValue::get(RetTy);
4014 unsigned MinElts = RetTy->getMinNumElements() / N;
4015 for (unsigned I = 0; I < N; I++) {
4016 Value *Idx = ConstantInt::get(Type::getInt64Ty(C), I * MinElts);
4017 Value *V = CI->getArgOperand(I);
4018 Ret = Builder.CreateInsertVector(RetTy, Ret, V, Idx);
4019 }
4020 NewCall = dyn_cast<CallInst>(Ret);
4021 }
4022 break;
4023 }
4024
4025 case Intrinsic::arm_neon_bfdot:
4026 case Intrinsic::arm_neon_bfmmla:
4027 case Intrinsic::arm_neon_bfmlalb:
4028 case Intrinsic::arm_neon_bfmlalt:
4029 case Intrinsic::aarch64_neon_bfdot:
4030 case Intrinsic::aarch64_neon_bfmmla:
4031 case Intrinsic::aarch64_neon_bfmlalb:
4032 case Intrinsic::aarch64_neon_bfmlalt: {
4033 SmallVector<Value *, 3> Args;
4034 assert(CI->arg_size() == 3 &&(static_cast <bool> (CI->arg_size() == 3 && "Mismatch between function args and call args"
) ? void (0) : __assert_fail ("CI->arg_size() == 3 && \"Mismatch between function args and call args\""
, "llvm/lib/IR/AutoUpgrade.cpp", 4035, __extension__ __PRETTY_FUNCTION__
))
4035 "Mismatch between function args and call args")(static_cast <bool> (CI->arg_size() == 3 && "Mismatch between function args and call args"
) ? void (0) : __assert_fail ("CI->arg_size() == 3 && \"Mismatch between function args and call args\""
, "llvm/lib/IR/AutoUpgrade.cpp", 4035, __extension__ __PRETTY_FUNCTION__
))
;
4036 size_t OperandWidth =
4037 CI->getArgOperand(1)->getType()->getPrimitiveSizeInBits();
4038 assert((OperandWidth == 64 || OperandWidth == 128) &&(static_cast <bool> ((OperandWidth == 64 || OperandWidth
== 128) && "Unexpected operand width") ? void (0) : __assert_fail
("(OperandWidth == 64 || OperandWidth == 128) && \"Unexpected operand width\""
, "llvm/lib/IR/AutoUpgrade.cpp", 4039, __extension__ __PRETTY_FUNCTION__
))
4039 "Unexpected operand width")(static_cast <bool> ((OperandWidth == 64 || OperandWidth
== 128) && "Unexpected operand width") ? void (0) : __assert_fail
("(OperandWidth == 64 || OperandWidth == 128) && \"Unexpected operand width\""
, "llvm/lib/IR/AutoUpgrade.cpp", 4039, __extension__ __PRETTY_FUNCTION__
))
;
4040 Type *NewTy = FixedVectorType::get(Type::getBFloatTy(C), OperandWidth / 16);
4041 auto Iter = CI->args().begin();
4042 Args.push_back(*Iter++);
4043 Args.push_back(Builder.CreateBitCast(*Iter++, NewTy));
4044 Args.push_back(Builder.CreateBitCast(*Iter++, NewTy));
4045 NewCall = Builder.CreateCall(NewFn, Args);
4046 break;
4047 }
4048
4049 case Intrinsic::bitreverse:
4050 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)});
4051 break;
4052
4053 case Intrinsic::ctlz:
4054 case Intrinsic::cttz:
4055 assert(CI->arg_size() == 1 &&(static_cast <bool> (CI->arg_size() == 1 && "Mismatch between function args and call args"
) ? void (0) : __assert_fail ("CI->arg_size() == 1 && \"Mismatch between function args and call args\""
, "llvm/lib/IR/AutoUpgrade.cpp", 4056, __extension__ __PRETTY_FUNCTION__
))
4056 "Mismatch between function args and call args")(static_cast <bool> (CI->arg_size() == 1 && "Mismatch between function args and call args"
) ? void (0) : __assert_fail ("CI->arg_size() == 1 && \"Mismatch between function args and call args\""
, "llvm/lib/IR/AutoUpgrade.cpp", 4056, __extension__ __PRETTY_FUNCTION__
))
;
4057 NewCall =
4058 Builder.CreateCall(NewFn, {CI->getArgOperand(0), Builder.getFalse()});
4059 break;
4060
4061 case Intrinsic::objectsize: {
4062 Value *NullIsUnknownSize =
4063 CI->arg_size() == 2 ? Builder.getFalse() : CI->getArgOperand(2);
4064 Value *Dynamic =
4065 CI->arg_size() < 4 ? Builder.getFalse() : CI->getArgOperand(3);
4066 NewCall = Builder.CreateCall(
4067 NewFn, {CI->getArgOperand(0), CI->getArgOperand(1), NullIsUnknownSize, Dynamic});
4068 break;
4069 }
4070
4071 case Intrinsic::ctpop:
4072 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)});
4073 break;
4074
4075 case Intrinsic::convert_from_fp16:
4076 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)});
4077 break;
4078
4079 case Intrinsic::dbg_value:
4080 // Upgrade from the old version that had an extra offset argument.
4081 assert(CI->arg_size() == 4)(static_cast <bool> (CI->arg_size() == 4) ? void (0)
: __assert_fail ("CI->arg_size() == 4", "llvm/lib/IR/AutoUpgrade.cpp"
, 4081, __extension__ __PRETTY_FUNCTION__))
;
4082 // Drop nonzero offsets instead of attempting to upgrade them.
4083 if (auto *Offset = dyn_cast_or_null<Constant>(CI->getArgOperand(1)))
4084 if (Offset->isZeroValue()) {
4085 NewCall = Builder.CreateCall(
4086 NewFn,
4087 {CI->getArgOperand(0), CI->getArgOperand(2), CI->getArgOperand(3)});
4088 break;
4089 }
4090 CI->eraseFromParent();
4091 return;
4092
4093 case Intrinsic::ptr_annotation:
4094 // Upgrade from versions that lacked the annotation attribute argument.
4095 if (CI->arg_size() != 4) {
4096 DefaultCase();
4097 return;
4098 }
4099
4100 // Create a new call with an added null annotation attribute argument.
4101 NewCall = Builder.CreateCall(
4102 NewFn,
4103 {CI->getArgOperand(0), CI->getArgOperand(1), CI->getArgOperand(2),
4104 CI->getArgOperand(3), Constant::getNullValue(Builder.getInt8PtrTy())});
4105 NewCall->takeName(CI);
4106 CI->replaceAllUsesWith(NewCall);
4107 CI->eraseFromParent();
4108 return;
4109
4110 case Intrinsic::var_annotation:
4111 // Upgrade from versions that lacked the annotation attribute argument.
4112 assert(CI->arg_size() == 4 &&(static_cast <bool> (CI->arg_size() == 4 && "Before LLVM 12.0 this intrinsic took four arguments"
) ? void (0) : __assert_fail ("CI->arg_size() == 4 && \"Before LLVM 12.0 this intrinsic took four arguments\""
, "llvm/lib/IR/AutoUpgrade.cpp", 4113, __extension__ __PRETTY_FUNCTION__
))
4113 "Before LLVM 12.0 this intrinsic took four arguments")(static_cast <bool> (CI->arg_size() == 4 && "Before LLVM 12.0 this intrinsic took four arguments"
) ? void (0) : __assert_fail ("CI->arg_size() == 4 && \"Before LLVM 12.0 this intrinsic took four arguments\""
, "llvm/lib/IR/AutoUpgrade.cpp", 4113, __extension__ __PRETTY_FUNCTION__
))
;
4114 // Create a new call with an added null annotation attribute argument.
4115 NewCall = Builder.CreateCall(
4116 NewFn,
4117 {CI->getArgOperand(0), CI->getArgOperand(1), CI->getArgOperand(2),
4118 CI->getArgOperand(3), Constant::getNullValue(Builder.getInt8PtrTy())});
4119 CI->eraseFromParent();
4120 return;
4121
4122 case Intrinsic::x86_xop_vfrcz_ss:
4123 case Intrinsic::x86_xop_vfrcz_sd:
4124 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(1)});
4125 break;
4126
4127 case Intrinsic::x86_xop_vpermil2pd:
4128 case Intrinsic::x86_xop_vpermil2ps:
4129 case Intrinsic::x86_xop_vpermil2pd_256:
4130 case Intrinsic::x86_xop_vpermil2ps_256: {
4131 SmallVector<Value *, 4> Args(CI->args());
4132 VectorType *FltIdxTy = cast<VectorType>(Args[2]->getType());
4133 VectorType *IntIdxTy = VectorType::getInteger(FltIdxTy);
4134 Args[2] = Builder.CreateBitCast(Args[2], IntIdxTy);
4135 NewCall = Builder.CreateCall(NewFn, Args);
4136 break;
4137 }
4138
4139 case Intrinsic::x86_sse41_ptestc:
4140 case Intrinsic::x86_sse41_ptestz:
4141 case Intrinsic::x86_sse41_ptestnzc: {
4142 // The arguments for these intrinsics used to be v4f32, and changed
4143 // to v2i64. This is purely a nop, since those are bitwise intrinsics.
4144 // So, the only thing required is a bitcast for both arguments.
4145 // First, check the arguments have the old type.
4146 Value *Arg0 = CI->getArgOperand(0);
4147 if (Arg0->getType() != FixedVectorType::get(Type::getFloatTy(C), 4))
4148 return;
4149
4150 // Old intrinsic, add bitcasts
4151 Value *Arg1 = CI->getArgOperand(1);
4152
4153 auto *NewVecTy = FixedVectorType::get(Type::getInt64Ty(C), 2);
4154
4155 Value *BC0 = Builder.CreateBitCast(Arg0, NewVecTy, "cast");
4156 Value *BC1 = Builder.CreateBitCast(Arg1, NewVecTy, "cast");
4157
4158 NewCall = Builder.CreateCall(NewFn, {BC0, BC1});
4159 break;
4160 }
4161
4162 case Intrinsic::x86_rdtscp: {
4163 // This used to take 1 arguments. If we have no arguments, it is already
4164 // upgraded.
4165 if (CI->getNumOperands() == 0)
4166 return;
4167
4168 NewCall = Builder.CreateCall(NewFn);
4169 // Extract the second result and store it.
4170 Value *Data = Builder.CreateExtractValue(NewCall, 1);
4171 // Cast the pointer to the right type.
4172 Value *Ptr = Builder.CreateBitCast(CI->getArgOperand(0),
4173 llvm::PointerType::getUnqual(Data->getType()));
4174 Builder.CreateAlignedStore(Data, Ptr, Align(1));
4175 // Replace the original call result with the first result of the new call.
4176 Value *TSC = Builder.CreateExtractValue(NewCall, 0);
4177
4178 NewCall->takeName(CI);
4179 CI->replaceAllUsesWith(TSC);
4180 CI->eraseFromParent();
4181 return;
4182 }
4183
4184 case Intrinsic::x86_sse41_insertps:
4185 case Intrinsic::x86_sse41_dppd:
4186 case Intrinsic::x86_sse41_dpps:
4187 case Intrinsic::x86_sse41_mpsadbw:
4188 case Intrinsic::x86_avx_dp_ps_256:
4189 case Intrinsic::x86_avx2_mpsadbw: {
4190 // Need to truncate the last argument from i32 to i8 -- this argument models
4191 // an inherently 8-bit immediate operand to these x86 instructions.
4192 SmallVector<Value *, 4> Args(CI->args());
4193
4194 // Replace the last argument with a trunc.
4195 Args.back() = Builder.CreateTrunc(Args.back(), Type::getInt8Ty(C), "trunc");
4196 NewCall = Builder.CreateCall(NewFn, Args);
4197 break;
4198 }
4199
4200 case Intrinsic::x86_avx512_mask_cmp_pd_128:
4201 case Intrinsic::x86_avx512_mask_cmp_pd_256:
4202 case Intrinsic::x86_avx512_mask_cmp_pd_512:
4203 case Intrinsic::x86_avx512_mask_cmp_ps_128:
4204 case Intrinsic::x86_avx512_mask_cmp_ps_256:
4205 case Intrinsic::x86_avx512_mask_cmp_ps_512: {
4206 SmallVector<Value *, 4> Args(CI->args());
4207 unsigned NumElts =
4208 cast<FixedVectorType>(Args[0]->getType())->getNumElements();
4209 Args[3] = getX86MaskVec(Builder, Args[3], NumElts);
4210
4211 NewCall = Builder.CreateCall(NewFn, Args);
4212 Value *Res = ApplyX86MaskOn1BitsVec(Builder, NewCall, nullptr);
4213
4214 NewCall->takeName(CI);
4215 CI->replaceAllUsesWith(Res);
4216 CI->eraseFromParent();
4217 return;
4218 }
4219
4220 case Intrinsic::x86_avx512bf16_cvtne2ps2bf16_128:
4221 case Intrinsic::x86_avx512bf16_cvtne2ps2bf16_256:
4222 case Intrinsic::x86_avx512bf16_cvtne2ps2bf16_512:
4223 case Intrinsic::x86_avx512bf16_mask_cvtneps2bf16_128:
4224 case Intrinsic::x86_avx512bf16_cvtneps2bf16_256:
4225 case Intrinsic::x86_avx512bf16_cvtneps2bf16_512: {
4226 SmallVector<Value *, 4> Args(CI->args());
4227 unsigned NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
4228 if (NewFn->getIntrinsicID() ==
4229 Intrinsic::x86_avx512bf16_mask_cvtneps2bf16_128)
4230 Args[1] = Builder.CreateBitCast(
4231 Args[1], FixedVectorType::get(Builder.getBFloatTy(), NumElts));
4232
4233 NewCall = Builder.CreateCall(NewFn, Args);
4234 Value *Res = Builder.CreateBitCast(
4235 NewCall, FixedVectorType::get(Builder.getInt16Ty(), NumElts));
4236
4237 NewCall->takeName(CI);
4238 CI->replaceAllUsesWith(Res);
4239 CI->eraseFromParent();
4240 return;
4241 }
4242 case Intrinsic::x86_avx512bf16_dpbf16ps_128:
4243 case Intrinsic::x86_avx512bf16_dpbf16ps_256:
4244 case Intrinsic::x86_avx512bf16_dpbf16ps_512:{
4245 SmallVector<Value *, 4> Args(CI->args());
4246 unsigned NumElts =
4247 cast<FixedVectorType>(CI->getType())->getNumElements() * 2;
4248 Args[1] = Builder.CreateBitCast(
4249 Args[1], FixedVectorType::get(Builder.getBFloatTy(), NumElts));
4250 Args[2] = Builder.CreateBitCast(
4251 Args[2], FixedVectorType::get(Builder.getBFloatTy(), NumElts));
4252
4253 NewCall = Builder.CreateCall(NewFn, Args);
4254 break;
4255 }
4256
4257 case Intrinsic::thread_pointer: {
4258 NewCall = Builder.CreateCall(NewFn, {});
4259 break;
4260 }
4261
4262 case Intrinsic::invariant_start:
4263 case Intrinsic::invariant_end: {
4264 SmallVector<Value *, 4> Args(CI->args());
4265 NewCall = Builder.CreateCall(NewFn, Args);
4266 break;
4267 }
4268 case Intrinsic::masked_load:
4269 case Intrinsic::masked_store:
4270 case Intrinsic::masked_gather:
4271 case Intrinsic::masked_scatter: {
4272 SmallVector<Value *, 4> Args(CI->args());
4273 NewCall = Builder.CreateCall(NewFn, Args);
4274 NewCall->copyMetadata(*CI);
4275 break;
4276 }
4277
4278 case Intrinsic::memcpy:
4279 case Intrinsic::memmove:
4280 case Intrinsic::memset: {
4281 // We have to make sure that the call signature is what we're expecting.
4282 // We only want to change the old signatures by removing the alignment arg:
4283 // @llvm.mem[cpy|move]...(i8*, i8*, i[32|i64], i32, i1)
4284 // -> @llvm.mem[cpy|move]...(i8*, i8*, i[32|i64], i1)
4285 // @llvm.memset...(i8*, i8, i[32|64], i32, i1)
4286 // -> @llvm.memset...(i8*, i8, i[32|64], i1)
4287 // Note: i8*'s in the above can be any pointer type
4288 if (CI->arg_size() != 5) {
4289 DefaultCase();
4290 return;
4291 }
4292 // Remove alignment argument (3), and add alignment attributes to the
4293 // dest/src pointers.
4294 Value *Args[4] = {CI->getArgOperand(0), CI->getArgOperand(1),
4295 CI->getArgOperand(2), CI->getArgOperand(4)};
4296 NewCall = Builder.CreateCall(NewFn, Args);
4297 AttributeList OldAttrs = CI->getAttributes();
4298 AttributeList NewAttrs = AttributeList::get(
4299 C, OldAttrs.getFnAttrs(), OldAttrs.getRetAttrs(),
4300 {OldAttrs.getParamAttrs(0), OldAttrs.getParamAttrs(1),
4301 OldAttrs.getParamAttrs(2), OldAttrs.getParamAttrs(4)});
4302 NewCall->setAttributes(NewAttrs);
4303 auto *MemCI = cast<MemIntrinsic>(NewCall);
4304 // All mem intrinsics support dest alignment.
4305 const ConstantInt *Align = cast<ConstantInt>(CI->getArgOperand(3));
4306 MemCI->setDestAlignment(Align->getMaybeAlignValue());
4307 // Memcpy/Memmove also support source alignment.
4308 if (auto *MTI = dyn_cast<MemTransferInst>(MemCI))
4309 MTI->setSourceAlignment(Align->getMaybeAlignValue());
4310 break;
4311 }
4312 }
4313 assert(NewCall && "Should have either set this variable or returned through "(static_cast <bool> (NewCall && "Should have either set this variable or returned through "
"the default case") ? void (0) : __assert_fail ("NewCall && \"Should have either set this variable or returned through \" \"the default case\""
, "llvm/lib/IR/AutoUpgrade.cpp", 4314, __extension__ __PRETTY_FUNCTION__
))
4314 "the default case")(static_cast <bool> (NewCall && "Should have either set this variable or returned through "
"the default case") ? void (0) : __assert_fail ("NewCall && \"Should have either set this variable or returned through \" \"the default case\""
, "llvm/lib/IR/AutoUpgrade.cpp", 4314, __extension__ __PRETTY_FUNCTION__
))
;
4315 NewCall->takeName(CI);
4316 CI->replaceAllUsesWith(NewCall);
4317 CI->eraseFromParent();
4318}
4319
4320void llvm::UpgradeCallsToIntrinsic(Function *F) {
4321 assert(F && "Illegal attempt to upgrade a non-existent intrinsic.")(static_cast <bool> (F && "Illegal attempt to upgrade a non-existent intrinsic."
) ? void (0) : __assert_fail ("F && \"Illegal attempt to upgrade a non-existent intrinsic.\""
, "llvm/lib/IR/AutoUpgrade.cpp", 4321, __extension__ __PRETTY_FUNCTION__
))
;
4322
4323 // Check if this function should be upgraded and get the replacement function
4324 // if there is one.
4325 Function *NewFn;
4326 if (UpgradeIntrinsicFunction(F, NewFn)) {
4327 // Replace all users of the old function with the new function or new
4328 // instructions. This is not a range loop because the call is deleted.
4329 for (User *U : make_early_inc_range(F->users()))
4330 if (CallBase *CB = dyn_cast<CallBase>(U))
4331 UpgradeIntrinsicCall(CB, NewFn);
4332
4333 // Remove old function, no longer used, from the module.
4334 F->eraseFromParent();
4335 }
4336}
4337
4338MDNode *llvm::UpgradeTBAANode(MDNode &MD) {
4339 // Check if the tag uses struct-path aware TBAA format.
4340 if (isa<MDNode>(MD.getOperand(0)) && MD.getNumOperands() >= 3)
4341 return &MD;
4342
4343 auto &Context = MD.getContext();
4344 if (MD.getNumOperands() == 3) {
4345 Metadata *Elts[] = {MD.getOperand(0), MD.getOperand(1)};
4346 MDNode *ScalarType = MDNode::get(Context, Elts);
4347 // Create a MDNode <ScalarType, ScalarType, offset 0, const>
4348 Metadata *Elts2[] = {ScalarType, ScalarType,
4349 ConstantAsMetadata::get(
4350 Constant::getNullValue(Type::getInt64Ty(Context))),
4351 MD.getOperand(2)};
4352 return MDNode::get(Context, Elts2);
4353 }
4354 // Create a MDNode <MD, MD, offset 0>
4355 Metadata *Elts[] = {&MD, &MD, ConstantAsMetadata::get(Constant::getNullValue(
4356 Type::getInt64Ty(Context)))};
4357 return MDNode::get(Context, Elts);
4358}
4359
4360Instruction *llvm::UpgradeBitCastInst(unsigned Opc, Value *V, Type *DestTy,
4361 Instruction *&Temp) {
4362 if (Opc != Instruction::BitCast)
4363 return nullptr;
4364
4365 Temp = nullptr;
4366 Type *SrcTy = V->getType();
4367 if (SrcTy->isPtrOrPtrVectorTy() && DestTy->isPtrOrPtrVectorTy() &&
4368 SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace()) {
4369 LLVMContext &Context = V->getContext();
4370
4371 // We have no information about target data layout, so we assume that
4372 // the maximum pointer size is 64bit.
4373 Type *MidTy = Type::getInt64Ty(Context);
4374 Temp = CastInst::Create(Instruction::PtrToInt, V, MidTy);
4375
4376 return CastInst::Create(Instruction::IntToPtr, Temp, DestTy);
4377 }
4378
4379 return nullptr;
4380}
4381
4382Constant *llvm::UpgradeBitCastExpr(unsigned Opc, Constant *C, Type *DestTy) {
4383 if (Opc != Instruction::BitCast)
4384 return nullptr;
4385
4386 Type *SrcTy = C->getType();
4387 if (SrcTy->isPtrOrPtrVectorTy() && DestTy->isPtrOrPtrVectorTy() &&
4388 SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace()) {
4389 LLVMContext &Context = C->getContext();
4390
4391 // We have no information about target data layout, so we assume that
4392 // the maximum pointer size is 64bit.
4393 Type *MidTy = Type::getInt64Ty(Context);
4394
4395 return ConstantExpr::getIntToPtr(ConstantExpr::getPtrToInt(C, MidTy),
4396 DestTy);
4397 }
4398
4399 return nullptr;
4400}
4401
4402/// Check the debug info version number, if it is out-dated, drop the debug
4403/// info. Return true if module is modified.
4404bool llvm::UpgradeDebugInfo(Module &M) {
4405 unsigned Version = getDebugMetadataVersionFromModule(M);
4406 if (Version == DEBUG_METADATA_VERSION) {
4407 bool BrokenDebugInfo = false;
4408 if (verifyModule(M, &llvm::errs(), &BrokenDebugInfo))
4409 report_fatal_error("Broken module found, compilation aborted!");
4410 if (!BrokenDebugInfo)
4411 // Everything is ok.
4412 return false;
4413 else {
4414 // Diagnose malformed debug info.
4415 DiagnosticInfoIgnoringInvalidDebugMetadata Diag(M);
4416 M.getContext().diagnose(Diag);
4417 }
4418 }
4419 bool Modified = StripDebugInfo(M);
4420 if (Modified && Version != DEBUG_METADATA_VERSION) {
4421 // Diagnose a version mismatch.
4422 DiagnosticInfoDebugMetadataVersion DiagVersion(M, Version);
4423 M.getContext().diagnose(DiagVersion);
4424 }
4425 return Modified;
4426}
4427
4428/// This checks for objc retain release marker which should be upgraded. It
4429/// returns true if module is modified.
4430static bool UpgradeRetainReleaseMarker(Module &M) {
4431 bool Changed = false;
4432 const char *MarkerKey = "clang.arc.retainAutoreleasedReturnValueMarker";
4433 NamedMDNode *ModRetainReleaseMarker = M.getNamedMetadata(MarkerKey);
4434 if (ModRetainReleaseMarker) {
4435 MDNode *Op = ModRetainReleaseMarker->getOperand(0);
4436 if (Op) {
4437 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(0));
4438 if (ID) {
4439 SmallVector<StringRef, 4> ValueComp;
4440 ID->getString().split(ValueComp, "#");
4441 if (ValueComp.size() == 2) {
4442 std::string NewValue = ValueComp[0].str() + ";" + ValueComp[1].str();
4443 ID = MDString::get(M.getContext(), NewValue);
4444 }
4445 M.addModuleFlag(Module::Error, MarkerKey, ID);
4446 M.eraseNamedMetadata(ModRetainReleaseMarker);
4447 Changed = true;
4448 }
4449 }
4450 }
4451 return Changed;
4452}
4453
4454void llvm::UpgradeARCRuntime(Module &M) {
4455 // This lambda converts normal function calls to ARC runtime functions to
4456 // intrinsic calls.
4457 auto UpgradeToIntrinsic = [&](const char *OldFunc,
4458 llvm::Intrinsic::ID IntrinsicFunc) {
4459 Function *Fn = M.getFunction(OldFunc);
4460
4461 if (!Fn)
4462 return;
4463
4464 Function *NewFn = llvm::Intrinsic::getDeclaration(&M, IntrinsicFunc);
4465
4466 for (User *U : make_early_inc_range(Fn->users())) {
4467 CallInst *CI = dyn_cast<CallInst>(U);
4468 if (!CI || CI->getCalledFunction() != Fn)
4469 continue;
4470
4471 IRBuilder<> Builder(CI->getParent(), CI->getIterator());
4472 FunctionType *NewFuncTy = NewFn->getFunctionType();
4473 SmallVector<Value *, 2> Args;
4474
4475 // Don't upgrade the intrinsic if it's not valid to bitcast the return
4476 // value to the return type of the old function.
4477 if (NewFuncTy->getReturnType() != CI->getType() &&
4478 !CastInst::castIsValid(Instruction::BitCast, CI,
4479 NewFuncTy->getReturnType()))
4480 continue;
4481
4482 bool InvalidCast = false;
4483
4484 for (unsigned I = 0, E = CI->arg_size(); I != E; ++I) {
4485 Value *Arg = CI->getArgOperand(I);
4486
4487 // Bitcast argument to the parameter type of the new function if it's
4488 // not a variadic argument.
4489 if (I < NewFuncTy->getNumParams()) {
4490 // Don't upgrade the intrinsic if it's not valid to bitcast the argument
4491 // to the parameter type of the new function.
4492 if (!CastInst::castIsValid(Instruction::BitCast, Arg,
4493 NewFuncTy->getParamType(I))) {
4494 InvalidCast = true;
4495 break;
4496 }
4497 Arg = Builder.CreateBitCast(Arg, NewFuncTy->getParamType(I));
4498 }
4499 Args.push_back(Arg);
4500 }
4501
4502 if (InvalidCast)
4503 continue;
4504
4505 // Create a call instruction that calls the new function.
4506 CallInst *NewCall = Builder.CreateCall(NewFuncTy, NewFn, Args);
4507 NewCall->setTailCallKind(cast<CallInst>(CI)->getTailCallKind());
4508 NewCall->takeName(CI);
4509
4510 // Bitcast the return value back to the type of the old call.
4511 Value *NewRetVal = Builder.CreateBitCast(NewCall, CI->getType());
4512
4513 if (!CI->use_empty())
4514 CI->replaceAllUsesWith(NewRetVal);
4515 CI->eraseFromParent();
4516 }
4517
4518 if (Fn->use_empty())
4519 Fn->eraseFromParent();
4520 };
4521
4522 // Unconditionally convert a call to "clang.arc.use" to a call to
4523 // "llvm.objc.clang.arc.use".
4524 UpgradeToIntrinsic("clang.arc.use", llvm::Intrinsic::objc_clang_arc_use);
4525
4526 // Upgrade the retain release marker. If there is no need to upgrade
4527 // the marker, that means either the module is already new enough to contain
4528 // new intrinsics or it is not ARC. There is no need to upgrade runtime call.
4529 if (!UpgradeRetainReleaseMarker(M))
4530 return;
4531
4532 std::pair<const char *, llvm::Intrinsic::ID> RuntimeFuncs[] = {
4533 {"objc_autorelease", llvm::Intrinsic::objc_autorelease},
4534 {"objc_autoreleasePoolPop", llvm::Intrinsic::objc_autoreleasePoolPop},
4535 {"objc_autoreleasePoolPush", llvm::Intrinsic::objc_autoreleasePoolPush},
4536 {"objc_autoreleaseReturnValue",
4537 llvm::Intrinsic::objc_autoreleaseReturnValue},
4538 {"objc_copyWeak", llvm::Intrinsic::objc_copyWeak},
4539 {"objc_destroyWeak", llvm::Intrinsic::objc_destroyWeak},
4540 {"objc_initWeak", llvm::Intrinsic::objc_initWeak},
4541 {"objc_loadWeak", llvm::Intrinsic::objc_loadWeak},
4542 {"objc_loadWeakRetained", llvm::Intrinsic::objc_loadWeakRetained},
4543 {"objc_moveWeak", llvm::Intrinsic::objc_moveWeak},
4544 {"objc_release", llvm::Intrinsic::objc_release},
4545 {"objc_retain", llvm::Intrinsic::objc_retain},
4546 {"objc_retainAutorelease", llvm::Intrinsic::objc_retainAutorelease},
4547 {"objc_retainAutoreleaseReturnValue",
4548 llvm::Intrinsic::objc_retainAutoreleaseReturnValue},
4549 {"objc_retainAutoreleasedReturnValue",
4550 llvm::Intrinsic::objc_retainAutoreleasedReturnValue},
4551 {"objc_retainBlock", llvm::Intrinsic::objc_retainBlock},
4552 {"objc_storeStrong", llvm::Intrinsic::objc_storeStrong},
4553 {"objc_storeWeak", llvm::Intrinsic::objc_storeWeak},
4554 {"objc_unsafeClaimAutoreleasedReturnValue",
4555 llvm::Intrinsic::objc_unsafeClaimAutoreleasedReturnValue},
4556 {"objc_retainedObject", llvm::Intrinsic::objc_retainedObject},
4557 {"objc_unretainedObject", llvm::Intrinsic::objc_unretainedObject},
4558 {"objc_unretainedPointer", llvm::Intrinsic::objc_unretainedPointer},
4559 {"objc_retain_autorelease", llvm::Intrinsic::objc_retain_autorelease},
4560 {"objc_sync_enter", llvm::Intrinsic::objc_sync_enter},
4561 {"objc_sync_exit", llvm::Intrinsic::objc_sync_exit},
4562 {"objc_arc_annotation_topdown_bbstart",
4563 llvm::Intrinsic::objc_arc_annotation_topdown_bbstart},
4564 {"objc_arc_annotation_topdown_bbend",
4565 llvm::Intrinsic::objc_arc_annotation_topdown_bbend},
4566 {"objc_arc_annotation_bottomup_bbstart",
4567 llvm::Intrinsic::objc_arc_annotation_bottomup_bbstart},
4568 {"objc_arc_annotation_bottomup_bbend",
4569 llvm::Intrinsic::objc_arc_annotation_bottomup_bbend}};
4570
4571 for (auto &I : RuntimeFuncs)
4572 UpgradeToIntrinsic(I.first, I.second);
4573}
4574
4575bool llvm::UpgradeModuleFlags(Module &M) {
4576 NamedMDNode *ModFlags = M.getModuleFlagsMetadata();
4577 if (!ModFlags)
4578 return false;
4579
4580 bool HasObjCFlag = false, HasClassProperties = false, Changed = false;
4581 bool HasSwiftVersionFlag = false;
4582 uint8_t SwiftMajorVersion, SwiftMinorVersion;
4583 uint32_t SwiftABIVersion;
4584 auto Int8Ty = Type::getInt8Ty(M.getContext());
4585 auto Int32Ty = Type::getInt32Ty(M.getContext());
4586
4587 for (unsigned I = 0, E = ModFlags->getNumOperands(); I != E; ++I) {
4588 MDNode *Op = ModFlags->getOperand(I);
4589 if (Op->getNumOperands() != 3)
4590 continue;
4591 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
4592 if (!ID)
4593 continue;
4594 auto SetBehavior = [&](Module::ModFlagBehavior B) {
4595 Metadata *Ops[3] = {ConstantAsMetadata::get(ConstantInt::get(
4596 Type::getInt32Ty(M.getContext()), B)),
4597 MDString::get(M.getContext(), ID->getString()),
4598 Op->getOperand(2)};
4599 ModFlags->setOperand(I, MDNode::get(M.getContext(), Ops));
4600 Changed = true;
4601 };
4602
4603 if (ID->getString() == "Objective-C Image Info Version")
4604 HasObjCFlag = true;
4605 if (ID->getString() == "Objective-C Class Properties")
4606 HasClassProperties = true;
4607 // Upgrade PIC from Error/Max to Min.
4608 if (ID->getString() == "PIC Level") {
4609 if (auto *Behavior =
4610 mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0))) {
4611 uint64_t V = Behavior->getLimitedValue();
4612 if (V == Module::Error || V == Module::Max)
4613 SetBehavior(Module::Min);
4614 }
4615 }
4616 // Upgrade "PIE Level" from Error to Max.
4617 if (ID->getString() == "PIE Level")
4618 if (auto *Behavior =
4619 mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0)))
4620 if (Behavior->getLimitedValue() == Module::Error)
4621 SetBehavior(Module::Max);
4622
4623 // Upgrade branch protection and return address signing module flags. The
4624 // module flag behavior for these fields were Error and now they are Min.
4625 if (ID->getString() == "branch-target-enforcement" ||
4626 ID->getString().startswith("sign-return-address")) {
4627 if (auto *Behavior =
4628 mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0))) {
4629 if (Behavior->getLimitedValue() == Module::Error) {
4630 Type *Int32Ty = Type::getInt32Ty(M.getContext());
4631 Metadata *Ops[3] = {
4632 ConstantAsMetadata::get(ConstantInt::get(Int32Ty, Module::Min)),
4633 Op->getOperand(1), Op->getOperand(2)};
4634 ModFlags->setOperand(I, MDNode::get(M.getContext(), Ops));
4635 Changed = true;
4636 }
4637 }
4638 }
4639
4640 // Upgrade Objective-C Image Info Section. Removed the whitespce in the
4641 // section name so that llvm-lto will not complain about mismatching
4642 // module flags that is functionally the same.
4643 if (ID->getString() == "Objective-C Image Info Section") {
4644 if (auto *Value = dyn_cast_or_null<MDString>(Op->getOperand(2))) {
4645 SmallVector<StringRef, 4> ValueComp;
4646 Value->getString().split(ValueComp, " ");
4647 if (ValueComp.size() != 1) {
4648 std::string NewValue;
4649 for (auto &S : ValueComp)
4650 NewValue += S.str();
4651 Metadata *Ops[3] = {Op->getOperand(0), Op->getOperand(1),
4652 MDString::get(M.getContext(), NewValue)};
4653 ModFlags->setOperand(I, MDNode::get(M.getContext(), Ops));
4654 Changed = true;
4655 }
4656 }
4657 }
4658
4659 // IRUpgrader turns a i32 type "Objective-C Garbage Collection" into i8 value.
4660 // If the higher bits are set, it adds new module flag for swift info.
4661 if (ID->getString() == "Objective-C Garbage Collection") {
4662 auto Md = dyn_cast<ConstantAsMetadata>(Op->getOperand(2));
4663 if (Md) {
4664 assert(Md->getValue() && "Expected non-empty metadata")(static_cast <bool> (Md->getValue() && "Expected non-empty metadata"
) ? void (0) : __assert_fail ("Md->getValue() && \"Expected non-empty metadata\""
, "llvm/lib/IR/AutoUpgrade.cpp", 4664, __extension__ __PRETTY_FUNCTION__
))
;
4665 auto Type = Md->getValue()->getType();
4666 if (Type == Int8Ty)
4667 continue;
4668 unsigned Val = Md->getValue()->getUniqueInteger().getZExtValue();
4669 if ((Val & 0xff) != Val) {
4670 HasSwiftVersionFlag = true;
4671 SwiftABIVersion = (Val & 0xff00) >> 8;
4672 SwiftMajorVersion = (Val & 0xff000000) >> 24;
4673 SwiftMinorVersion = (Val & 0xff0000) >> 16;
4674 }
4675 Metadata *Ops[3] = {
4676 ConstantAsMetadata::get(ConstantInt::get(Int32Ty,Module::Error)),
4677 Op->getOperand(1),
4678 ConstantAsMetadata::get(ConstantInt::get(Int8Ty,Val & 0xff))};
4679 ModFlags->setOperand(I, MDNode::get(M.getContext(), Ops));
4680 Changed = true;
4681 }
4682 }
4683 }
4684
4685 // "Objective-C Class Properties" is recently added for Objective-C. We
4686 // upgrade ObjC bitcodes to contain a "Objective-C Class Properties" module
4687 // flag of value 0, so we can correclty downgrade this flag when trying to
4688 // link an ObjC bitcode without this module flag with an ObjC bitcode with
4689 // this module flag.
4690 if (HasObjCFlag && !HasClassProperties) {
4691 M.addModuleFlag(llvm::Module::Override, "Objective-C Class Properties",
4692 (uint32_t)0);
4693 Changed = true;
4694 }
4695
4696 if (HasSwiftVersionFlag) {
4697 M.addModuleFlag(Module::Error, "Swift ABI Version",
4698 SwiftABIVersion);
4699 M.addModuleFlag(Module::Error, "Swift Major Version",
4700 ConstantInt::get(Int8Ty, SwiftMajorVersion));
4701 M.addModuleFlag(Module::Error, "Swift Minor Version",
4702 ConstantInt::get(Int8Ty, SwiftMinorVersion));
4703 Changed = true;
4704 }
4705
4706 return Changed;
4707}
4708
4709void llvm::UpgradeSectionAttributes(Module &M) {
4710 auto TrimSpaces = [](StringRef Section) -> std::string {
4711 SmallVector<StringRef, 5> Components;
4712 Section.split(Components, ',');
4713
4714 SmallString<32> Buffer;
4715 raw_svector_ostream OS(Buffer);
4716
4717 for (auto Component : Components)
4718 OS << ',' << Component.trim();
4719
4720 return std::string(OS.str().substr(1));
4721 };
4722
4723 for (auto &GV : M.globals()) {
4724 if (!GV.hasSection())
4725 continue;
4726
4727 StringRef Section = GV.getSection();
4728
4729 if (!Section.startswith("__DATA, __objc_catlist"))
4730 continue;
4731
4732 // __DATA, __objc_catlist, regular, no_dead_strip
4733 // __DATA,__objc_catlist,regular,no_dead_strip
4734 GV.setSection(TrimSpaces(Section));
4735 }
4736}
4737
4738namespace {
4739// Prior to LLVM 10.0, the strictfp attribute could be used on individual
4740// callsites within a function that did not also have the strictfp attribute.
4741// Since 10.0, if strict FP semantics are needed within a function, the
4742// function must have the strictfp attribute and all calls within the function
4743// must also have the strictfp attribute. This latter restriction is
4744// necessary to prevent unwanted libcall simplification when a function is
4745// being cloned (such as for inlining).
4746//
4747// The "dangling" strictfp attribute usage was only used to prevent constant
4748// folding and other libcall simplification. The nobuiltin attribute on the
4749// callsite has the same effect.
4750struct StrictFPUpgradeVisitor : public InstVisitor<StrictFPUpgradeVisitor> {
4751 StrictFPUpgradeVisitor() = default;
4752
4753 void visitCallBase(CallBase &Call) {
4754 if (!Call.isStrictFP())
4755 return;
4756 if (isa<ConstrainedFPIntrinsic>(&Call))
4757 return;
4758 // If we get here, the caller doesn't have the strictfp attribute
4759 // but this callsite does. Replace the strictfp attribute with nobuiltin.
4760 Call.removeFnAttr(Attribute::StrictFP);
4761 Call.addFnAttr(Attribute::NoBuiltin);
4762 }
4763};
4764} // namespace
4765
4766void llvm::UpgradeFunctionAttributes(Function &F) {
4767 // If a function definition doesn't have the strictfp attribute,
4768 // convert any callsite strictfp attributes to nobuiltin.
4769 if (!F.isDeclaration() && !F.hasFnAttribute(Attribute::StrictFP)) {
4770 StrictFPUpgradeVisitor SFPV;
4771 SFPV.visit(F);
4772 }
4773
4774 // Remove all incompatibile attributes from function.
4775 F.removeRetAttrs(AttributeFuncs::typeIncompatible(F.getReturnType()));
4776 for (auto &Arg : F.args())
4777 Arg.removeAttrs(AttributeFuncs::typeIncompatible(Arg.getType()));
4778}
4779
4780static bool isOldLoopArgument(Metadata *MD) {
4781 auto *T = dyn_cast_or_null<MDTuple>(MD);
4782 if (!T)
4783 return false;
4784 if (T->getNumOperands() < 1)
4785 return false;
4786 auto *S = dyn_cast_or_null<MDString>(T->getOperand(0));
4787 if (!S)
4788 return false;
4789 return S->getString().startswith("llvm.vectorizer.");
4790}
4791
4792static MDString *upgradeLoopTag(LLVMContext &C, StringRef OldTag) {
4793 StringRef OldPrefix = "llvm.vectorizer.";
4794 assert(OldTag.startswith(OldPrefix) && "Expected old prefix")(static_cast <bool> (OldTag.startswith(OldPrefix) &&
"Expected old prefix") ? void (0) : __assert_fail ("OldTag.startswith(OldPrefix) && \"Expected old prefix\""
, "llvm/lib/IR/AutoUpgrade.cpp", 4794, __extension__ __PRETTY_FUNCTION__
))
;
4795
4796 if (OldTag == "llvm.vectorizer.unroll")
4797 return MDString::get(C, "llvm.loop.interleave.count");
4798
4799 return MDString::get(
4800 C, (Twine("llvm.loop.vectorize.") + OldTag.drop_front(OldPrefix.size()))
4801 .str());
4802}
4803
4804static Metadata *upgradeLoopArgument(Metadata *MD) {
4805 auto *T = dyn_cast_or_null<MDTuple>(MD);
4806 if (!T)
4807 return MD;
4808 if (T->getNumOperands() < 1)
4809 return MD;
4810 auto *OldTag = dyn_cast_or_null<MDString>(T->getOperand(0));
4811 if (!OldTag)
4812 return MD;
4813 if (!OldTag->getString().startswith("llvm.vectorizer."))
4814 return MD;
4815
4816 // This has an old tag. Upgrade it.
4817 SmallVector<Metadata *, 8> Ops;
4818 Ops.reserve(T->getNumOperands());
4819 Ops.push_back(upgradeLoopTag(T->getContext(), OldTag->getString()));
4820 for (unsigned I = 1, E = T->getNumOperands(); I != E; ++I)
4821 Ops.push_back(T->getOperand(I));
4822
4823 return MDTuple::get(T->getContext(), Ops);
4824}
4825
4826MDNode *llvm::upgradeInstructionLoopAttachment(MDNode &N) {
4827 auto *T = dyn_cast<MDTuple>(&N);
4828 if (!T)
4829 return &N;
4830
4831 if (none_of(T->operands(), isOldLoopArgument))
4832 return &N;
4833
4834 SmallVector<Metadata *, 8> Ops;
4835 Ops.reserve(T->getNumOperands());
4836 for (Metadata *MD : T->operands())
4837 Ops.push_back(upgradeLoopArgument(MD));
4838
4839 return MDTuple::get(T->getContext(), Ops);
4840}
4841
4842std::string llvm::UpgradeDataLayoutString(StringRef DL, StringRef TT) {
4843 Triple T(TT);
4844 // For AMDGPU we uprgrade older DataLayouts to include the default globals
4845 // address space of 1.
4846 if (T.isAMDGPU() && !DL.contains("-G") && !DL.startswith("G")) {
4847 return DL.empty() ? std::string("G1") : (DL + "-G1").str();
4848 }
4849
4850 if (T.isRISCV64()) {
4851 // Make i32 a native type for 64-bit RISC-V.
4852 auto I = DL.find("-n64-");
4853 if (I != StringRef::npos)
4854 return (DL.take_front(I) + "-n32:64-" + DL.drop_front(I + 5)).str();
4855 return DL.str();
4856 }
4857
4858 std::string Res = DL.str();
4859 if (!T.isX86())
4860 return Res;
4861
4862 // If the datalayout matches the expected format, add pointer size address
4863 // spaces to the datalayout.
4864 std::string AddrSpaces = "-p270:32:32-p271:32:32-p272:64:64";
4865 if (!DL.contains(AddrSpaces)) {
4866 SmallVector<StringRef, 4> Groups;
4867 Regex R("(e-m:[a-z](-p:32:32)?)(-[if]64:.*$)");
4868 if (R.match(DL, &Groups))
4869 Res = (Groups[1] + AddrSpaces + Groups[3]).str();
4870 }
4871
4872 // For 32-bit MSVC targets, raise the alignment of f80 values to 16 bytes.
4873 // Raising the alignment is safe because Clang did not produce f80 values in
4874 // the MSVC environment before this upgrade was added.
4875 if (T.isWindowsMSVCEnvironment() && !T.isArch64Bit()) {
4876 StringRef Ref = Res;
4877 auto I = Ref.find("-f80:32-");
4878 if (I != StringRef::npos)
4879 Res = (Ref.take_front(I) + "-f80:128-" + Ref.drop_front(I + 8)).str();
4880 }
4881
4882 return Res;
4883}
4884
4885void llvm::UpgradeAttributes(AttrBuilder &B) {
4886 StringRef FramePointer;
4887 Attribute A = B.getAttribute("no-frame-pointer-elim");
4888 if (A.isValid()) {
4889 // The value can be "true" or "false".
4890 FramePointer = A.getValueAsString() == "true" ? "all" : "none";
4891 B.removeAttribute("no-frame-pointer-elim");
4892 }
4893 if (B.contains("no-frame-pointer-elim-non-leaf")) {
4894 // The value is ignored. "no-frame-pointer-elim"="true" takes priority.
4895 if (FramePointer != "all")
4896 FramePointer = "non-leaf";
4897 B.removeAttribute("no-frame-pointer-elim-non-leaf");
4898 }
4899 if (!FramePointer.empty())
4900 B.addAttribute("frame-pointer", FramePointer);
4901
4902 A = B.getAttribute("null-pointer-is-valid");
4903 if (A.isValid()) {
4904 // The value can be "true" or "false".
4905 bool NullPointerIsValid = A.getValueAsString() == "true";
4906 B.removeAttribute("null-pointer-is-valid");
4907 if (NullPointerIsValid)
4908 B.addAttribute(Attribute::NullPointerIsValid);
4909 }
4910}
4911
4912void llvm::UpgradeOperandBundles(std::vector<OperandBundleDef> &Bundles) {
4913
4914 // clang.arc.attachedcall bundles are now required to have an operand.
4915 // If they don't, it's okay to drop them entirely: when there is an operand,
4916 // the "attachedcall" is meaningful and required, but without an operand,
4917 // it's just a marker NOP. Dropping it merely prevents an optimization.
4918 erase_if(Bundles, [&](OperandBundleDef &OBD) {
4919 return OBD.getTag() == "clang.arc.attachedcall" &&
4920 OBD.inputs().empty();
4921 });
4922}