Bug Summary

File:build/source/llvm/lib/IR/AutoUpgrade.cpp
Warning:line 4066, column 26
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name AutoUpgrade.cpp -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/source/build-llvm -resource-dir /usr/lib/llvm-17/lib/clang/17 -I lib/IR -I /build/source/llvm/lib/IR -I include -I /build/source/llvm/include -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-17/lib/clang/17/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/source/build-llvm=build-llvm -fmacro-prefix-map=/build/source/= -fcoverage-prefix-map=/build/source/build-llvm=build-llvm -fcoverage-prefix-map=/build/source/= -source-date-epoch 1675682001 -O3 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -Wno-misleading-indentation -std=c++17 -fdeprecated-macro -fdebug-compilation-dir=/build/source/build-llvm -fdebug-prefix-map=/build/source/build-llvm=build-llvm -fdebug-prefix-map=/build/source/= -fdebug-prefix-map=/build/source/build-llvm=build-llvm -fdebug-prefix-map=/build/source/= -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2023-02-06-130241-16458-1 -x c++ /build/source/llvm/lib/IR/AutoUpgrade.cpp
1//===-- AutoUpgrade.cpp - Implement auto-upgrade helper functions ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the auto-upgrade helper functions.
10// This is where deprecated IR intrinsics and other IR features are updated to
11// current specifications.
12//
13//===----------------------------------------------------------------------===//
14
15#include "llvm/IR/AutoUpgrade.h"
16#include "llvm/ADT/StringSwitch.h"
17#include "llvm/ADT/Triple.h"
18#include "llvm/IR/Constants.h"
19#include "llvm/IR/DebugInfo.h"
20#include "llvm/IR/DiagnosticInfo.h"
21#include "llvm/IR/Function.h"
22#include "llvm/IR/IRBuilder.h"
23#include "llvm/IR/InstVisitor.h"
24#include "llvm/IR/Instruction.h"
25#include "llvm/IR/IntrinsicInst.h"
26#include "llvm/IR/Intrinsics.h"
27#include "llvm/IR/IntrinsicsAArch64.h"
28#include "llvm/IR/IntrinsicsARM.h"
29#include "llvm/IR/IntrinsicsX86.h"
30#include "llvm/IR/LLVMContext.h"
31#include "llvm/IR/Module.h"
32#include "llvm/IR/Verifier.h"
33#include "llvm/Support/ErrorHandling.h"
34#include "llvm/Support/Regex.h"
35#include <cstring>
36using namespace llvm;
37
38static void rename(GlobalValue *GV) { GV->setName(GV->getName() + ".old"); }
39
40// Upgrade the declarations of the SSE4.1 ptest intrinsics whose arguments have
41// changed their type from v4f32 to v2i64.
42static bool UpgradePTESTIntrinsic(Function* F, Intrinsic::ID IID,
43 Function *&NewFn) {
44 // Check whether this is an old version of the function, which received
45 // v4f32 arguments.
46 Type *Arg0Type = F->getFunctionType()->getParamType(0);
47 if (Arg0Type != FixedVectorType::get(Type::getFloatTy(F->getContext()), 4))
48 return false;
49
50 // Yes, it's old, replace it with new version.
51 rename(F);
52 NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
53 return true;
54}
55
56// Upgrade the declarations of intrinsic functions whose 8-bit immediate mask
57// arguments have changed their type from i32 to i8.
58static bool UpgradeX86IntrinsicsWith8BitMask(Function *F, Intrinsic::ID IID,
59 Function *&NewFn) {
60 // Check that the last argument is an i32.
61 Type *LastArgType = F->getFunctionType()->getParamType(
62 F->getFunctionType()->getNumParams() - 1);
63 if (!LastArgType->isIntegerTy(32))
64 return false;
65
66 // Move this function aside and map down.
67 rename(F);
68 NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
69 return true;
70}
71
72// Upgrade the declaration of fp compare intrinsics that change return type
73// from scalar to vXi1 mask.
74static bool UpgradeX86MaskedFPCompare(Function *F, Intrinsic::ID IID,
75 Function *&NewFn) {
76 // Check if the return type is a vector.
77 if (F->getReturnType()->isVectorTy())
78 return false;
79
80 rename(F);
81 NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
82 return true;
83}
84
85static bool UpgradeX86BF16Intrinsic(Function *F, Intrinsic::ID IID,
86 Function *&NewFn) {
87 if (F->getReturnType()->getScalarType()->isBFloatTy())
88 return false;
89
90 rename(F);
91 NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
92 return true;
93}
94
95static bool UpgradeX86BF16DPIntrinsic(Function *F, Intrinsic::ID IID,
96 Function *&NewFn) {
97 if (F->getFunctionType()->getParamType(1)->getScalarType()->isBFloatTy())
98 return false;
99
100 rename(F);
101 NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
102 return true;
103}
104
105static bool ShouldUpgradeX86Intrinsic(Function *F, StringRef Name) {
106 // All of the intrinsics matches below should be marked with which llvm
107 // version started autoupgrading them. At some point in the future we would
108 // like to use this information to remove upgrade code for some older
109 // intrinsics. It is currently undecided how we will determine that future
110 // point.
111 if (Name == "addcarryx.u32" || // Added in 8.0
112 Name == "addcarryx.u64" || // Added in 8.0
113 Name == "addcarry.u32" || // Added in 8.0
114 Name == "addcarry.u64" || // Added in 8.0
115 Name == "subborrow.u32" || // Added in 8.0
116 Name == "subborrow.u64" || // Added in 8.0
117 Name.startswith("sse2.padds.") || // Added in 8.0
118 Name.startswith("sse2.psubs.") || // Added in 8.0
119 Name.startswith("sse2.paddus.") || // Added in 8.0
120 Name.startswith("sse2.psubus.") || // Added in 8.0
121 Name.startswith("avx2.padds.") || // Added in 8.0
122 Name.startswith("avx2.psubs.") || // Added in 8.0
123 Name.startswith("avx2.paddus.") || // Added in 8.0
124 Name.startswith("avx2.psubus.") || // Added in 8.0
125 Name.startswith("avx512.padds.") || // Added in 8.0
126 Name.startswith("avx512.psubs.") || // Added in 8.0
127 Name.startswith("avx512.mask.padds.") || // Added in 8.0
128 Name.startswith("avx512.mask.psubs.") || // Added in 8.0
129 Name.startswith("avx512.mask.paddus.") || // Added in 8.0
130 Name.startswith("avx512.mask.psubus.") || // Added in 8.0
131 Name=="ssse3.pabs.b.128" || // Added in 6.0
132 Name=="ssse3.pabs.w.128" || // Added in 6.0
133 Name=="ssse3.pabs.d.128" || // Added in 6.0
134 Name.startswith("fma4.vfmadd.s") || // Added in 7.0
135 Name.startswith("fma.vfmadd.") || // Added in 7.0
136 Name.startswith("fma.vfmsub.") || // Added in 7.0
137 Name.startswith("fma.vfmsubadd.") || // Added in 7.0
138 Name.startswith("fma.vfnmadd.") || // Added in 7.0
139 Name.startswith("fma.vfnmsub.") || // Added in 7.0
140 Name.startswith("avx512.mask.vfmadd.") || // Added in 7.0
141 Name.startswith("avx512.mask.vfnmadd.") || // Added in 7.0
142 Name.startswith("avx512.mask.vfnmsub.") || // Added in 7.0
143 Name.startswith("avx512.mask3.vfmadd.") || // Added in 7.0
144 Name.startswith("avx512.maskz.vfmadd.") || // Added in 7.0
145 Name.startswith("avx512.mask3.vfmsub.") || // Added in 7.0
146 Name.startswith("avx512.mask3.vfnmsub.") || // Added in 7.0
147 Name.startswith("avx512.mask.vfmaddsub.") || // Added in 7.0
148 Name.startswith("avx512.maskz.vfmaddsub.") || // Added in 7.0
149 Name.startswith("avx512.mask3.vfmaddsub.") || // Added in 7.0
150 Name.startswith("avx512.mask3.vfmsubadd.") || // Added in 7.0
151 Name.startswith("avx512.mask.shuf.i") || // Added in 6.0
152 Name.startswith("avx512.mask.shuf.f") || // Added in 6.0
153 Name.startswith("avx512.kunpck") || //added in 6.0
154 Name.startswith("avx2.pabs.") || // Added in 6.0
155 Name.startswith("avx512.mask.pabs.") || // Added in 6.0
156 Name.startswith("avx512.broadcastm") || // Added in 6.0
157 Name == "sse.sqrt.ss" || // Added in 7.0
158 Name == "sse2.sqrt.sd" || // Added in 7.0
159 Name.startswith("avx512.mask.sqrt.p") || // Added in 7.0
160 Name.startswith("avx.sqrt.p") || // Added in 7.0
161 Name.startswith("sse2.sqrt.p") || // Added in 7.0
162 Name.startswith("sse.sqrt.p") || // Added in 7.0
163 Name.startswith("avx512.mask.pbroadcast") || // Added in 6.0
164 Name.startswith("sse2.pcmpeq.") || // Added in 3.1
165 Name.startswith("sse2.pcmpgt.") || // Added in 3.1
166 Name.startswith("avx2.pcmpeq.") || // Added in 3.1
167 Name.startswith("avx2.pcmpgt.") || // Added in 3.1
168 Name.startswith("avx512.mask.pcmpeq.") || // Added in 3.9
169 Name.startswith("avx512.mask.pcmpgt.") || // Added in 3.9
170 Name.startswith("avx.vperm2f128.") || // Added in 6.0
171 Name == "avx2.vperm2i128" || // Added in 6.0
172 Name == "sse.add.ss" || // Added in 4.0
173 Name == "sse2.add.sd" || // Added in 4.0
174 Name == "sse.sub.ss" || // Added in 4.0
175 Name == "sse2.sub.sd" || // Added in 4.0
176 Name == "sse.mul.ss" || // Added in 4.0
177 Name == "sse2.mul.sd" || // Added in 4.0
178 Name == "sse.div.ss" || // Added in 4.0
179 Name == "sse2.div.sd" || // Added in 4.0
180 Name == "sse41.pmaxsb" || // Added in 3.9
181 Name == "sse2.pmaxs.w" || // Added in 3.9
182 Name == "sse41.pmaxsd" || // Added in 3.9
183 Name == "sse2.pmaxu.b" || // Added in 3.9
184 Name == "sse41.pmaxuw" || // Added in 3.9
185 Name == "sse41.pmaxud" || // Added in 3.9
186 Name == "sse41.pminsb" || // Added in 3.9
187 Name == "sse2.pmins.w" || // Added in 3.9
188 Name == "sse41.pminsd" || // Added in 3.9
189 Name == "sse2.pminu.b" || // Added in 3.9
190 Name == "sse41.pminuw" || // Added in 3.9
191 Name == "sse41.pminud" || // Added in 3.9
192 Name == "avx512.kand.w" || // Added in 7.0
193 Name == "avx512.kandn.w" || // Added in 7.0
194 Name == "avx512.knot.w" || // Added in 7.0
195 Name == "avx512.kor.w" || // Added in 7.0
196 Name == "avx512.kxor.w" || // Added in 7.0
197 Name == "avx512.kxnor.w" || // Added in 7.0
198 Name == "avx512.kortestc.w" || // Added in 7.0
199 Name == "avx512.kortestz.w" || // Added in 7.0
200 Name.startswith("avx512.mask.pshuf.b.") || // Added in 4.0
201 Name.startswith("avx2.pmax") || // Added in 3.9
202 Name.startswith("avx2.pmin") || // Added in 3.9
203 Name.startswith("avx512.mask.pmax") || // Added in 4.0
204 Name.startswith("avx512.mask.pmin") || // Added in 4.0
205 Name.startswith("avx2.vbroadcast") || // Added in 3.8
206 Name.startswith("avx2.pbroadcast") || // Added in 3.8
207 Name.startswith("avx.vpermil.") || // Added in 3.1
208 Name.startswith("sse2.pshuf") || // Added in 3.9
209 Name.startswith("avx512.pbroadcast") || // Added in 3.9
210 Name.startswith("avx512.mask.broadcast.s") || // Added in 3.9
211 Name.startswith("avx512.mask.movddup") || // Added in 3.9
212 Name.startswith("avx512.mask.movshdup") || // Added in 3.9
213 Name.startswith("avx512.mask.movsldup") || // Added in 3.9
214 Name.startswith("avx512.mask.pshuf.d.") || // Added in 3.9
215 Name.startswith("avx512.mask.pshufl.w.") || // Added in 3.9
216 Name.startswith("avx512.mask.pshufh.w.") || // Added in 3.9
217 Name.startswith("avx512.mask.shuf.p") || // Added in 4.0
218 Name.startswith("avx512.mask.vpermil.p") || // Added in 3.9
219 Name.startswith("avx512.mask.perm.df.") || // Added in 3.9
220 Name.startswith("avx512.mask.perm.di.") || // Added in 3.9
221 Name.startswith("avx512.mask.punpckl") || // Added in 3.9
222 Name.startswith("avx512.mask.punpckh") || // Added in 3.9
223 Name.startswith("avx512.mask.unpckl.") || // Added in 3.9
224 Name.startswith("avx512.mask.unpckh.") || // Added in 3.9
225 Name.startswith("avx512.mask.pand.") || // Added in 3.9
226 Name.startswith("avx512.mask.pandn.") || // Added in 3.9
227 Name.startswith("avx512.mask.por.") || // Added in 3.9
228 Name.startswith("avx512.mask.pxor.") || // Added in 3.9
229 Name.startswith("avx512.mask.and.") || // Added in 3.9
230 Name.startswith("avx512.mask.andn.") || // Added in 3.9
231 Name.startswith("avx512.mask.or.") || // Added in 3.9
232 Name.startswith("avx512.mask.xor.") || // Added in 3.9
233 Name.startswith("avx512.mask.padd.") || // Added in 4.0
234 Name.startswith("avx512.mask.psub.") || // Added in 4.0
235 Name.startswith("avx512.mask.pmull.") || // Added in 4.0
236 Name.startswith("avx512.mask.cvtdq2pd.") || // Added in 4.0
237 Name.startswith("avx512.mask.cvtudq2pd.") || // Added in 4.0
238 Name.startswith("avx512.mask.cvtudq2ps.") || // Added in 7.0 updated 9.0
239 Name.startswith("avx512.mask.cvtqq2pd.") || // Added in 7.0 updated 9.0
240 Name.startswith("avx512.mask.cvtuqq2pd.") || // Added in 7.0 updated 9.0
241 Name.startswith("avx512.mask.cvtdq2ps.") || // Added in 7.0 updated 9.0
242 Name == "avx512.mask.vcvtph2ps.128" || // Added in 11.0
243 Name == "avx512.mask.vcvtph2ps.256" || // Added in 11.0
244 Name == "avx512.mask.cvtqq2ps.256" || // Added in 9.0
245 Name == "avx512.mask.cvtqq2ps.512" || // Added in 9.0
246 Name == "avx512.mask.cvtuqq2ps.256" || // Added in 9.0
247 Name == "avx512.mask.cvtuqq2ps.512" || // Added in 9.0
248 Name == "avx512.mask.cvtpd2dq.256" || // Added in 7.0
249 Name == "avx512.mask.cvtpd2ps.256" || // Added in 7.0
250 Name == "avx512.mask.cvttpd2dq.256" || // Added in 7.0
251 Name == "avx512.mask.cvttps2dq.128" || // Added in 7.0
252 Name == "avx512.mask.cvttps2dq.256" || // Added in 7.0
253 Name == "avx512.mask.cvtps2pd.128" || // Added in 7.0
254 Name == "avx512.mask.cvtps2pd.256" || // Added in 7.0
255 Name == "avx512.cvtusi2sd" || // Added in 7.0
256 Name.startswith("avx512.mask.permvar.") || // Added in 7.0
257 Name == "sse2.pmulu.dq" || // Added in 7.0
258 Name == "sse41.pmuldq" || // Added in 7.0
259 Name == "avx2.pmulu.dq" || // Added in 7.0
260 Name == "avx2.pmul.dq" || // Added in 7.0
261 Name == "avx512.pmulu.dq.512" || // Added in 7.0
262 Name == "avx512.pmul.dq.512" || // Added in 7.0
263 Name.startswith("avx512.mask.pmul.dq.") || // Added in 4.0
264 Name.startswith("avx512.mask.pmulu.dq.") || // Added in 4.0
265 Name.startswith("avx512.mask.pmul.hr.sw.") || // Added in 7.0
266 Name.startswith("avx512.mask.pmulh.w.") || // Added in 7.0
267 Name.startswith("avx512.mask.pmulhu.w.") || // Added in 7.0
268 Name.startswith("avx512.mask.pmaddw.d.") || // Added in 7.0
269 Name.startswith("avx512.mask.pmaddubs.w.") || // Added in 7.0
270 Name.startswith("avx512.mask.packsswb.") || // Added in 5.0
271 Name.startswith("avx512.mask.packssdw.") || // Added in 5.0
272 Name.startswith("avx512.mask.packuswb.") || // Added in 5.0
273 Name.startswith("avx512.mask.packusdw.") || // Added in 5.0
274 Name.startswith("avx512.mask.cmp.b") || // Added in 5.0
275 Name.startswith("avx512.mask.cmp.d") || // Added in 5.0
276 Name.startswith("avx512.mask.cmp.q") || // Added in 5.0
277 Name.startswith("avx512.mask.cmp.w") || // Added in 5.0
278 Name.startswith("avx512.cmp.p") || // Added in 12.0
279 Name.startswith("avx512.mask.ucmp.") || // Added in 5.0
280 Name.startswith("avx512.cvtb2mask.") || // Added in 7.0
281 Name.startswith("avx512.cvtw2mask.") || // Added in 7.0
282 Name.startswith("avx512.cvtd2mask.") || // Added in 7.0
283 Name.startswith("avx512.cvtq2mask.") || // Added in 7.0
284 Name.startswith("avx512.mask.vpermilvar.") || // Added in 4.0
285 Name.startswith("avx512.mask.psll.d") || // Added in 4.0
286 Name.startswith("avx512.mask.psll.q") || // Added in 4.0
287 Name.startswith("avx512.mask.psll.w") || // Added in 4.0
288 Name.startswith("avx512.mask.psra.d") || // Added in 4.0
289 Name.startswith("avx512.mask.psra.q") || // Added in 4.0
290 Name.startswith("avx512.mask.psra.w") || // Added in 4.0
291 Name.startswith("avx512.mask.psrl.d") || // Added in 4.0
292 Name.startswith("avx512.mask.psrl.q") || // Added in 4.0
293 Name.startswith("avx512.mask.psrl.w") || // Added in 4.0
294 Name.startswith("avx512.mask.pslli") || // Added in 4.0
295 Name.startswith("avx512.mask.psrai") || // Added in 4.0
296 Name.startswith("avx512.mask.psrli") || // Added in 4.0
297 Name.startswith("avx512.mask.psllv") || // Added in 4.0
298 Name.startswith("avx512.mask.psrav") || // Added in 4.0
299 Name.startswith("avx512.mask.psrlv") || // Added in 4.0
300 Name.startswith("sse41.pmovsx") || // Added in 3.8
301 Name.startswith("sse41.pmovzx") || // Added in 3.9
302 Name.startswith("avx2.pmovsx") || // Added in 3.9
303 Name.startswith("avx2.pmovzx") || // Added in 3.9
304 Name.startswith("avx512.mask.pmovsx") || // Added in 4.0
305 Name.startswith("avx512.mask.pmovzx") || // Added in 4.0
306 Name.startswith("avx512.mask.lzcnt.") || // Added in 5.0
307 Name.startswith("avx512.mask.pternlog.") || // Added in 7.0
308 Name.startswith("avx512.maskz.pternlog.") || // Added in 7.0
309 Name.startswith("avx512.mask.vpmadd52") || // Added in 7.0
310 Name.startswith("avx512.maskz.vpmadd52") || // Added in 7.0
311 Name.startswith("avx512.mask.vpermi2var.") || // Added in 7.0
312 Name.startswith("avx512.mask.vpermt2var.") || // Added in 7.0
313 Name.startswith("avx512.maskz.vpermt2var.") || // Added in 7.0
314 Name.startswith("avx512.mask.vpdpbusd.") || // Added in 7.0
315 Name.startswith("avx512.maskz.vpdpbusd.") || // Added in 7.0
316 Name.startswith("avx512.mask.vpdpbusds.") || // Added in 7.0
317 Name.startswith("avx512.maskz.vpdpbusds.") || // Added in 7.0
318 Name.startswith("avx512.mask.vpdpwssd.") || // Added in 7.0
319 Name.startswith("avx512.maskz.vpdpwssd.") || // Added in 7.0
320 Name.startswith("avx512.mask.vpdpwssds.") || // Added in 7.0
321 Name.startswith("avx512.maskz.vpdpwssds.") || // Added in 7.0
322 Name.startswith("avx512.mask.dbpsadbw.") || // Added in 7.0
323 Name.startswith("avx512.mask.vpshld.") || // Added in 7.0
324 Name.startswith("avx512.mask.vpshrd.") || // Added in 7.0
325 Name.startswith("avx512.mask.vpshldv.") || // Added in 8.0
326 Name.startswith("avx512.mask.vpshrdv.") || // Added in 8.0
327 Name.startswith("avx512.maskz.vpshldv.") || // Added in 8.0
328 Name.startswith("avx512.maskz.vpshrdv.") || // Added in 8.0
329 Name.startswith("avx512.vpshld.") || // Added in 8.0
330 Name.startswith("avx512.vpshrd.") || // Added in 8.0
331 Name.startswith("avx512.mask.add.p") || // Added in 7.0. 128/256 in 4.0
332 Name.startswith("avx512.mask.sub.p") || // Added in 7.0. 128/256 in 4.0
333 Name.startswith("avx512.mask.mul.p") || // Added in 7.0. 128/256 in 4.0
334 Name.startswith("avx512.mask.div.p") || // Added in 7.0. 128/256 in 4.0
335 Name.startswith("avx512.mask.max.p") || // Added in 7.0. 128/256 in 5.0
336 Name.startswith("avx512.mask.min.p") || // Added in 7.0. 128/256 in 5.0
337 Name.startswith("avx512.mask.fpclass.p") || // Added in 7.0
338 Name.startswith("avx512.mask.vpshufbitqmb.") || // Added in 8.0
339 Name.startswith("avx512.mask.pmultishift.qb.") || // Added in 8.0
340 Name.startswith("avx512.mask.conflict.") || // Added in 9.0
341 Name == "avx512.mask.pmov.qd.256" || // Added in 9.0
342 Name == "avx512.mask.pmov.qd.512" || // Added in 9.0
343 Name == "avx512.mask.pmov.wb.256" || // Added in 9.0
344 Name == "avx512.mask.pmov.wb.512" || // Added in 9.0
345 Name == "sse.cvtsi2ss" || // Added in 7.0
346 Name == "sse.cvtsi642ss" || // Added in 7.0
347 Name == "sse2.cvtsi2sd" || // Added in 7.0
348 Name == "sse2.cvtsi642sd" || // Added in 7.0
349 Name == "sse2.cvtss2sd" || // Added in 7.0
350 Name == "sse2.cvtdq2pd" || // Added in 3.9
351 Name == "sse2.cvtdq2ps" || // Added in 7.0
352 Name == "sse2.cvtps2pd" || // Added in 3.9
353 Name == "avx.cvtdq2.pd.256" || // Added in 3.9
354 Name == "avx.cvtdq2.ps.256" || // Added in 7.0
355 Name == "avx.cvt.ps2.pd.256" || // Added in 3.9
356 Name.startswith("vcvtph2ps.") || // Added in 11.0
357 Name.startswith("avx.vinsertf128.") || // Added in 3.7
358 Name == "avx2.vinserti128" || // Added in 3.7
359 Name.startswith("avx512.mask.insert") || // Added in 4.0
360 Name.startswith("avx.vextractf128.") || // Added in 3.7
361 Name == "avx2.vextracti128" || // Added in 3.7
362 Name.startswith("avx512.mask.vextract") || // Added in 4.0
363 Name.startswith("sse4a.movnt.") || // Added in 3.9
364 Name.startswith("avx.movnt.") || // Added in 3.2
365 Name.startswith("avx512.storent.") || // Added in 3.9
366 Name == "sse41.movntdqa" || // Added in 5.0
367 Name == "avx2.movntdqa" || // Added in 5.0
368 Name == "avx512.movntdqa" || // Added in 5.0
369 Name == "sse2.storel.dq" || // Added in 3.9
370 Name.startswith("sse.storeu.") || // Added in 3.9
371 Name.startswith("sse2.storeu.") || // Added in 3.9
372 Name.startswith("avx.storeu.") || // Added in 3.9
373 Name.startswith("avx512.mask.storeu.") || // Added in 3.9
374 Name.startswith("avx512.mask.store.p") || // Added in 3.9
375 Name.startswith("avx512.mask.store.b.") || // Added in 3.9
376 Name.startswith("avx512.mask.store.w.") || // Added in 3.9
377 Name.startswith("avx512.mask.store.d.") || // Added in 3.9
378 Name.startswith("avx512.mask.store.q.") || // Added in 3.9
379 Name == "avx512.mask.store.ss" || // Added in 7.0
380 Name.startswith("avx512.mask.loadu.") || // Added in 3.9
381 Name.startswith("avx512.mask.load.") || // Added in 3.9
382 Name.startswith("avx512.mask.expand.load.") || // Added in 7.0
383 Name.startswith("avx512.mask.compress.store.") || // Added in 7.0
384 Name.startswith("avx512.mask.expand.b") || // Added in 9.0
385 Name.startswith("avx512.mask.expand.w") || // Added in 9.0
386 Name.startswith("avx512.mask.expand.d") || // Added in 9.0
387 Name.startswith("avx512.mask.expand.q") || // Added in 9.0
388 Name.startswith("avx512.mask.expand.p") || // Added in 9.0
389 Name.startswith("avx512.mask.compress.b") || // Added in 9.0
390 Name.startswith("avx512.mask.compress.w") || // Added in 9.0
391 Name.startswith("avx512.mask.compress.d") || // Added in 9.0
392 Name.startswith("avx512.mask.compress.q") || // Added in 9.0
393 Name.startswith("avx512.mask.compress.p") || // Added in 9.0
394 Name == "sse42.crc32.64.8" || // Added in 3.4
395 Name.startswith("avx.vbroadcast.s") || // Added in 3.5
396 Name.startswith("avx512.vbroadcast.s") || // Added in 7.0
397 Name.startswith("avx512.mask.palignr.") || // Added in 3.9
398 Name.startswith("avx512.mask.valign.") || // Added in 4.0
399 Name.startswith("sse2.psll.dq") || // Added in 3.7
400 Name.startswith("sse2.psrl.dq") || // Added in 3.7
401 Name.startswith("avx2.psll.dq") || // Added in 3.7
402 Name.startswith("avx2.psrl.dq") || // Added in 3.7
403 Name.startswith("avx512.psll.dq") || // Added in 3.9
404 Name.startswith("avx512.psrl.dq") || // Added in 3.9
405 Name == "sse41.pblendw" || // Added in 3.7
406 Name.startswith("sse41.blendp") || // Added in 3.7
407 Name.startswith("avx.blend.p") || // Added in 3.7
408 Name == "avx2.pblendw" || // Added in 3.7
409 Name.startswith("avx2.pblendd.") || // Added in 3.7
410 Name.startswith("avx.vbroadcastf128") || // Added in 4.0
411 Name == "avx2.vbroadcasti128" || // Added in 3.7
412 Name.startswith("avx512.mask.broadcastf32x4.") || // Added in 6.0
413 Name.startswith("avx512.mask.broadcastf64x2.") || // Added in 6.0
414 Name.startswith("avx512.mask.broadcastf32x8.") || // Added in 6.0
415 Name.startswith("avx512.mask.broadcastf64x4.") || // Added in 6.0
416 Name.startswith("avx512.mask.broadcasti32x4.") || // Added in 6.0
417 Name.startswith("avx512.mask.broadcasti64x2.") || // Added in 6.0
418 Name.startswith("avx512.mask.broadcasti32x8.") || // Added in 6.0
419 Name.startswith("avx512.mask.broadcasti64x4.") || // Added in 6.0
420 Name == "xop.vpcmov" || // Added in 3.8
421 Name == "xop.vpcmov.256" || // Added in 5.0
422 Name.startswith("avx512.mask.move.s") || // Added in 4.0
423 Name.startswith("avx512.cvtmask2") || // Added in 5.0
424 Name.startswith("xop.vpcom") || // Added in 3.2, Updated in 9.0
425 Name.startswith("xop.vprot") || // Added in 8.0
426 Name.startswith("avx512.prol") || // Added in 8.0
427 Name.startswith("avx512.pror") || // Added in 8.0
428 Name.startswith("avx512.mask.prorv.") || // Added in 8.0
429 Name.startswith("avx512.mask.pror.") || // Added in 8.0
430 Name.startswith("avx512.mask.prolv.") || // Added in 8.0
431 Name.startswith("avx512.mask.prol.") || // Added in 8.0
432 Name.startswith("avx512.ptestm") || //Added in 6.0
433 Name.startswith("avx512.ptestnm") || //Added in 6.0
434 Name.startswith("avx512.mask.pavg")) // Added in 6.0
435 return true;
436
437 return false;
438}
439
440static bool UpgradeX86IntrinsicFunction(Function *F, StringRef Name,
441 Function *&NewFn) {
442 // Only handle intrinsics that start with "x86.".
443 if (!Name.startswith("x86."))
444 return false;
445 // Remove "x86." prefix.
446 Name = Name.substr(4);
447
448 if (ShouldUpgradeX86Intrinsic(F, Name)) {
449 NewFn = nullptr;
450 return true;
451 }
452
453 if (Name == "rdtscp") { // Added in 8.0
454 // If this intrinsic has 0 operands, it's the new version.
455 if (F->getFunctionType()->getNumParams() == 0)
456 return false;
457
458 rename(F);
459 NewFn = Intrinsic::getDeclaration(F->getParent(),
460 Intrinsic::x86_rdtscp);
461 return true;
462 }
463
464 // SSE4.1 ptest functions may have an old signature.
465 if (Name.startswith("sse41.ptest")) { // Added in 3.2
466 if (Name.substr(11) == "c")
467 return UpgradePTESTIntrinsic(F, Intrinsic::x86_sse41_ptestc, NewFn);
468 if (Name.substr(11) == "z")
469 return UpgradePTESTIntrinsic(F, Intrinsic::x86_sse41_ptestz, NewFn);
470 if (Name.substr(11) == "nzc")
471 return UpgradePTESTIntrinsic(F, Intrinsic::x86_sse41_ptestnzc, NewFn);
472 }
473 // Several blend and other instructions with masks used the wrong number of
474 // bits.
475 if (Name == "sse41.insertps") // Added in 3.6
476 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_insertps,
477 NewFn);
478 if (Name == "sse41.dppd") // Added in 3.6
479 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_dppd,
480 NewFn);
481 if (Name == "sse41.dpps") // Added in 3.6
482 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_dpps,
483 NewFn);
484 if (Name == "sse41.mpsadbw") // Added in 3.6
485 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_mpsadbw,
486 NewFn);
487 if (Name == "avx.dp.ps.256") // Added in 3.6
488 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_avx_dp_ps_256,
489 NewFn);
490 if (Name == "avx2.mpsadbw") // Added in 3.6
491 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_avx2_mpsadbw,
492 NewFn);
493 if (Name == "avx512.mask.cmp.pd.128") // Added in 7.0
494 return UpgradeX86MaskedFPCompare(F, Intrinsic::x86_avx512_mask_cmp_pd_128,
495 NewFn);
496 if (Name == "avx512.mask.cmp.pd.256") // Added in 7.0
497 return UpgradeX86MaskedFPCompare(F, Intrinsic::x86_avx512_mask_cmp_pd_256,
498 NewFn);
499 if (Name == "avx512.mask.cmp.pd.512") // Added in 7.0
500 return UpgradeX86MaskedFPCompare(F, Intrinsic::x86_avx512_mask_cmp_pd_512,
501 NewFn);
502 if (Name == "avx512.mask.cmp.ps.128") // Added in 7.0
503 return UpgradeX86MaskedFPCompare(F, Intrinsic::x86_avx512_mask_cmp_ps_128,
504 NewFn);
505 if (Name == "avx512.mask.cmp.ps.256") // Added in 7.0
506 return UpgradeX86MaskedFPCompare(F, Intrinsic::x86_avx512_mask_cmp_ps_256,
507 NewFn);
508 if (Name == "avx512.mask.cmp.ps.512") // Added in 7.0
509 return UpgradeX86MaskedFPCompare(F, Intrinsic::x86_avx512_mask_cmp_ps_512,
510 NewFn);
511 if (Name == "avx512bf16.cvtne2ps2bf16.128") // Added in 9.0
512 return UpgradeX86BF16Intrinsic(
513 F, Intrinsic::x86_avx512bf16_cvtne2ps2bf16_128, NewFn);
514 if (Name == "avx512bf16.cvtne2ps2bf16.256") // Added in 9.0
515 return UpgradeX86BF16Intrinsic(
516 F, Intrinsic::x86_avx512bf16_cvtne2ps2bf16_256, NewFn);
517 if (Name == "avx512bf16.cvtne2ps2bf16.512") // Added in 9.0
518 return UpgradeX86BF16Intrinsic(
519 F, Intrinsic::x86_avx512bf16_cvtne2ps2bf16_512, NewFn);
520 if (Name == "avx512bf16.mask.cvtneps2bf16.128") // Added in 9.0
521 return UpgradeX86BF16Intrinsic(
522 F, Intrinsic::x86_avx512bf16_mask_cvtneps2bf16_128, NewFn);
523 if (Name == "avx512bf16.cvtneps2bf16.256") // Added in 9.0
524 return UpgradeX86BF16Intrinsic(
525 F, Intrinsic::x86_avx512bf16_cvtneps2bf16_256, NewFn);
526 if (Name == "avx512bf16.cvtneps2bf16.512") // Added in 9.0
527 return UpgradeX86BF16Intrinsic(
528 F, Intrinsic::x86_avx512bf16_cvtneps2bf16_512, NewFn);
529 if (Name == "avx512bf16.dpbf16ps.128") // Added in 9.0
530 return UpgradeX86BF16DPIntrinsic(
531 F, Intrinsic::x86_avx512bf16_dpbf16ps_128, NewFn);
532 if (Name == "avx512bf16.dpbf16ps.256") // Added in 9.0
533 return UpgradeX86BF16DPIntrinsic(
534 F, Intrinsic::x86_avx512bf16_dpbf16ps_256, NewFn);
535 if (Name == "avx512bf16.dpbf16ps.512") // Added in 9.0
536 return UpgradeX86BF16DPIntrinsic(
537 F, Intrinsic::x86_avx512bf16_dpbf16ps_512, NewFn);
538
539 // frcz.ss/sd may need to have an argument dropped. Added in 3.2
540 if (Name.startswith("xop.vfrcz.ss") && F->arg_size() == 2) {
541 rename(F);
542 NewFn = Intrinsic::getDeclaration(F->getParent(),
543 Intrinsic::x86_xop_vfrcz_ss);
544 return true;
545 }
546 if (Name.startswith("xop.vfrcz.sd") && F->arg_size() == 2) {
547 rename(F);
548 NewFn = Intrinsic::getDeclaration(F->getParent(),
549 Intrinsic::x86_xop_vfrcz_sd);
550 return true;
551 }
552 // Upgrade any XOP PERMIL2 index operand still using a float/double vector.
553 if (Name.startswith("xop.vpermil2")) { // Added in 3.9
554 auto Idx = F->getFunctionType()->getParamType(2);
555 if (Idx->isFPOrFPVectorTy()) {
556 rename(F);
557 unsigned IdxSize = Idx->getPrimitiveSizeInBits();
558 unsigned EltSize = Idx->getScalarSizeInBits();
559 Intrinsic::ID Permil2ID;
560 if (EltSize == 64 && IdxSize == 128)
561 Permil2ID = Intrinsic::x86_xop_vpermil2pd;
562 else if (EltSize == 32 && IdxSize == 128)
563 Permil2ID = Intrinsic::x86_xop_vpermil2ps;
564 else if (EltSize == 64 && IdxSize == 256)
565 Permil2ID = Intrinsic::x86_xop_vpermil2pd_256;
566 else
567 Permil2ID = Intrinsic::x86_xop_vpermil2ps_256;
568 NewFn = Intrinsic::getDeclaration(F->getParent(), Permil2ID);
569 return true;
570 }
571 }
572
573 if (Name == "seh.recoverfp") {
574 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::eh_recoverfp);
575 return true;
576 }
577
578 return false;
579}
580
581static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
582 assert(F && "Illegal to upgrade a non-existent Function.")(static_cast <bool> (F && "Illegal to upgrade a non-existent Function."
) ? void (0) : __assert_fail ("F && \"Illegal to upgrade a non-existent Function.\""
, "llvm/lib/IR/AutoUpgrade.cpp", 582, __extension__ __PRETTY_FUNCTION__
))
;
583
584 // Quickly eliminate it, if it's not a candidate.
585 StringRef Name = F->getName();
586 if (Name.size() <= 7 || !Name.startswith("llvm."))
587 return false;
588 Name = Name.substr(5); // Strip off "llvm."
589
590 switch (Name[0]) {
591 default: break;
592 case 'a': {
593 if (Name.startswith("arm.rbit") || Name.startswith("aarch64.rbit")) {
594 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::bitreverse,
595 F->arg_begin()->getType());
596 return true;
597 }
598 if (Name.startswith("aarch64.neon.frintn")) {
599 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::roundeven,
600 F->arg_begin()->getType());
601 return true;
602 }
603 if (Name.startswith("aarch64.neon.rbit")) {
604 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::bitreverse,
605 F->arg_begin()->getType());
606 return true;
607 }
608 if (Name == "aarch64.sve.bfdot.lane") {
609 NewFn = Intrinsic::getDeclaration(F->getParent(),
610 Intrinsic::aarch64_sve_bfdot_lane_v2);
611 return true;
612 }
613 if (Name == "aarch64.sve.bfmlalb.lane") {
614 NewFn = Intrinsic::getDeclaration(F->getParent(),
615 Intrinsic::aarch64_sve_bfmlalb_lane_v2);
616 return true;
617 }
618 if (Name == "aarch64.sve.bfmlalt.lane") {
619 NewFn = Intrinsic::getDeclaration(F->getParent(),
620 Intrinsic::aarch64_sve_bfmlalt_lane_v2);
621 return true;
622 }
623 static const Regex LdRegex("^aarch64\\.sve\\.ld[234](.nxv[a-z0-9]+|$)");
624 if (LdRegex.match(Name)) {
625 Type *ScalarTy =
626 dyn_cast<VectorType>(F->getReturnType())->getElementType();
627 ElementCount EC =
628 dyn_cast<VectorType>(F->arg_begin()->getType())->getElementCount();
629 Type *Ty = VectorType::get(ScalarTy, EC);
630 Intrinsic::ID ID =
631 StringSwitch<Intrinsic::ID>(Name)
632 .StartsWith("aarch64.sve.ld2", Intrinsic::aarch64_sve_ld2_sret)
633 .StartsWith("aarch64.sve.ld3", Intrinsic::aarch64_sve_ld3_sret)
634 .StartsWith("aarch64.sve.ld4", Intrinsic::aarch64_sve_ld4_sret)
635 .Default(Intrinsic::not_intrinsic);
636 NewFn = Intrinsic::getDeclaration(F->getParent(), ID, Ty);
637 return true;
638 }
639 if (Name.startswith("aarch64.sve.tuple.get")) {
640 Type *Tys[] = {F->getReturnType(), F->arg_begin()->getType()};
641 NewFn = Intrinsic::getDeclaration(F->getParent(),
642 Intrinsic::vector_extract, Tys);
643 return true;
644 }
645 if (Name.startswith("aarch64.sve.tuple.set")) {
646 auto Args = F->getFunctionType()->params();
647 Type *Tys[] = {Args[0], Args[2], Args[1]};
648 NewFn = Intrinsic::getDeclaration(F->getParent(),
649 Intrinsic::vector_insert, Tys);
650 return true;
651 }
652 static const Regex CreateTupleRegex(
653 "^aarch64\\.sve\\.tuple\\.create[234](.nxv[a-z0-9]+|$)");
654 if (CreateTupleRegex.match(Name)) {
655 auto Args = F->getFunctionType()->params();
656 Type *Tys[] = {F->getReturnType(), Args[1]};
657 NewFn = Intrinsic::getDeclaration(F->getParent(),
658 Intrinsic::vector_insert, Tys);
659 return true;
660 }
661 if (Name.startswith("arm.neon.vclz")) {
662 Type* args[2] = {
663 F->arg_begin()->getType(),
664 Type::getInt1Ty(F->getContext())
665 };
666 // Can't use Intrinsic::getDeclaration here as it adds a ".i1" to
667 // the end of the name. Change name from llvm.arm.neon.vclz.* to
668 // llvm.ctlz.*
669 FunctionType* fType = FunctionType::get(F->getReturnType(), args, false);
670 NewFn = Function::Create(fType, F->getLinkage(), F->getAddressSpace(),
671 "llvm.ctlz." + Name.substr(14), F->getParent());
672 return true;
673 }
674 if (Name.startswith("arm.neon.vcnt")) {
675 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctpop,
676 F->arg_begin()->getType());
677 return true;
678 }
679 static const Regex vstRegex("^arm\\.neon\\.vst([1234]|[234]lane)\\.v[a-z0-9]*$");
680 if (vstRegex.match(Name)) {
681 static const Intrinsic::ID StoreInts[] = {Intrinsic::arm_neon_vst1,
682 Intrinsic::arm_neon_vst2,
683 Intrinsic::arm_neon_vst3,
684 Intrinsic::arm_neon_vst4};
685
686 static const Intrinsic::ID StoreLaneInts[] = {
687 Intrinsic::arm_neon_vst2lane, Intrinsic::arm_neon_vst3lane,
688 Intrinsic::arm_neon_vst4lane
689 };
690
691 auto fArgs = F->getFunctionType()->params();
692 Type *Tys[] = {fArgs[0], fArgs[1]};
693 if (!Name.contains("lane"))
694 NewFn = Intrinsic::getDeclaration(F->getParent(),
695 StoreInts[fArgs.size() - 3], Tys);
696 else
697 NewFn = Intrinsic::getDeclaration(F->getParent(),
698 StoreLaneInts[fArgs.size() - 5], Tys);
699 return true;
700 }
701 if (Name == "aarch64.thread.pointer" || Name == "arm.thread.pointer") {
702 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::thread_pointer);
703 return true;
704 }
705 if (Name.startswith("arm.neon.vqadds.")) {
706 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::sadd_sat,
707 F->arg_begin()->getType());
708 return true;
709 }
710 if (Name.startswith("arm.neon.vqaddu.")) {
711 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::uadd_sat,
712 F->arg_begin()->getType());
713 return true;
714 }
715 if (Name.startswith("arm.neon.vqsubs.")) {
716 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ssub_sat,
717 F->arg_begin()->getType());
718 return true;
719 }
720 if (Name.startswith("arm.neon.vqsubu.")) {
721 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::usub_sat,
722 F->arg_begin()->getType());
723 return true;
724 }
725 if (Name.startswith("aarch64.neon.addp")) {
726 if (F->arg_size() != 2)
727 break; // Invalid IR.
728 VectorType *Ty = dyn_cast<VectorType>(F->getReturnType());
729 if (Ty && Ty->getElementType()->isFloatingPointTy()) {
730 NewFn = Intrinsic::getDeclaration(F->getParent(),
731 Intrinsic::aarch64_neon_faddp, Ty);
732 return true;
733 }
734 }
735
736 // Changed in 12.0: bfdot accept v4bf16 and v8bf16 instead of v8i8 and v16i8
737 // respectively
738 if ((Name.startswith("arm.neon.bfdot.") ||
739 Name.startswith("aarch64.neon.bfdot.")) &&
740 Name.endswith("i8")) {
741 Intrinsic::ID IID =
742 StringSwitch<Intrinsic::ID>(Name)
743 .Cases("arm.neon.bfdot.v2f32.v8i8",
744 "arm.neon.bfdot.v4f32.v16i8",
745 Intrinsic::arm_neon_bfdot)
746 .Cases("aarch64.neon.bfdot.v2f32.v8i8",
747 "aarch64.neon.bfdot.v4f32.v16i8",
748 Intrinsic::aarch64_neon_bfdot)
749 .Default(Intrinsic::not_intrinsic);
750 if (IID == Intrinsic::not_intrinsic)
751 break;
752
753 size_t OperandWidth = F->getReturnType()->getPrimitiveSizeInBits();
754 assert((OperandWidth == 64 || OperandWidth == 128) &&(static_cast <bool> ((OperandWidth == 64 || OperandWidth
== 128) && "Unexpected operand width") ? void (0) : __assert_fail
("(OperandWidth == 64 || OperandWidth == 128) && \"Unexpected operand width\""
, "llvm/lib/IR/AutoUpgrade.cpp", 755, __extension__ __PRETTY_FUNCTION__
))
755 "Unexpected operand width")(static_cast <bool> ((OperandWidth == 64 || OperandWidth
== 128) && "Unexpected operand width") ? void (0) : __assert_fail
("(OperandWidth == 64 || OperandWidth == 128) && \"Unexpected operand width\""
, "llvm/lib/IR/AutoUpgrade.cpp", 755, __extension__ __PRETTY_FUNCTION__
))
;
756 LLVMContext &Ctx = F->getParent()->getContext();
757 std::array<Type *, 2> Tys {{
758 F->getReturnType(),
759 FixedVectorType::get(Type::getBFloatTy(Ctx), OperandWidth / 16)
760 }};
761 NewFn = Intrinsic::getDeclaration(F->getParent(), IID, Tys);
762 return true;
763 }
764
765 // Changed in 12.0: bfmmla, bfmlalb and bfmlalt are not polymorphic anymore
766 // and accept v8bf16 instead of v16i8
767 if ((Name.startswith("arm.neon.bfm") ||
768 Name.startswith("aarch64.neon.bfm")) &&
769 Name.endswith(".v4f32.v16i8")) {
770 Intrinsic::ID IID =
771 StringSwitch<Intrinsic::ID>(Name)
772 .Case("arm.neon.bfmmla.v4f32.v16i8",
773 Intrinsic::arm_neon_bfmmla)
774 .Case("arm.neon.bfmlalb.v4f32.v16i8",
775 Intrinsic::arm_neon_bfmlalb)
776 .Case("arm.neon.bfmlalt.v4f32.v16i8",
777 Intrinsic::arm_neon_bfmlalt)
778 .Case("aarch64.neon.bfmmla.v4f32.v16i8",
779 Intrinsic::aarch64_neon_bfmmla)
780 .Case("aarch64.neon.bfmlalb.v4f32.v16i8",
781 Intrinsic::aarch64_neon_bfmlalb)
782 .Case("aarch64.neon.bfmlalt.v4f32.v16i8",
783 Intrinsic::aarch64_neon_bfmlalt)
784 .Default(Intrinsic::not_intrinsic);
785 if (IID == Intrinsic::not_intrinsic)
786 break;
787
788 std::array<Type *, 0> Tys;
789 NewFn = Intrinsic::getDeclaration(F->getParent(), IID, Tys);
790 return true;
791 }
792
793 if (Name == "arm.mve.vctp64" &&
794 cast<FixedVectorType>(F->getReturnType())->getNumElements() == 4) {
795 // A vctp64 returning a v4i1 is converted to return a v2i1. Rename the
796 // function and deal with it below in UpgradeIntrinsicCall.
797 rename(F);
798 return true;
799 }
800 // These too are changed to accept a v2i1 insteead of the old v4i1.
801 if (Name == "arm.mve.mull.int.predicated.v2i64.v4i32.v4i1" ||
802 Name == "arm.mve.vqdmull.predicated.v2i64.v4i32.v4i1" ||
803 Name == "arm.mve.vldr.gather.base.predicated.v2i64.v2i64.v4i1" ||
804 Name == "arm.mve.vldr.gather.base.wb.predicated.v2i64.v2i64.v4i1" ||
805 Name ==
806 "arm.mve.vldr.gather.offset.predicated.v2i64.p0i64.v2i64.v4i1" ||
807 Name == "arm.mve.vldr.gather.offset.predicated.v2i64.p0.v2i64.v4i1" ||
808 Name == "arm.mve.vstr.scatter.base.predicated.v2i64.v2i64.v4i1" ||
809 Name == "arm.mve.vstr.scatter.base.wb.predicated.v2i64.v2i64.v4i1" ||
810 Name ==
811 "arm.mve.vstr.scatter.offset.predicated.p0i64.v2i64.v2i64.v4i1" ||
812 Name == "arm.mve.vstr.scatter.offset.predicated.p0.v2i64.v2i64.v4i1" ||
813 Name == "arm.cde.vcx1q.predicated.v2i64.v4i1" ||
814 Name == "arm.cde.vcx1qa.predicated.v2i64.v4i1" ||
815 Name == "arm.cde.vcx2q.predicated.v2i64.v4i1" ||
816 Name == "arm.cde.vcx2qa.predicated.v2i64.v4i1" ||
817 Name == "arm.cde.vcx3q.predicated.v2i64.v4i1" ||
818 Name == "arm.cde.vcx3qa.predicated.v2i64.v4i1")
819 return true;
820
821 if (Name == "amdgcn.alignbit") {
822 // Target specific intrinsic became redundant
823 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::fshr,
824 {F->getReturnType()});
825 return true;
826 }
827
828 break;
829 }
830
831 case 'c': {
832 if (Name.startswith("ctlz.") && F->arg_size() == 1) {
833 rename(F);
834 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctlz,
835 F->arg_begin()->getType());
836 return true;
837 }
838 if (Name.startswith("cttz.") && F->arg_size() == 1) {
839 rename(F);
840 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::cttz,
841 F->arg_begin()->getType());
842 return true;
843 }
844 break;
845 }
846 case 'd': {
847 if (Name == "dbg.value" && F->arg_size() == 4) {
848 rename(F);
849 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::dbg_value);
850 return true;
851 }
852 break;
853 }
854 case 'e': {
855 if (Name.startswith("experimental.vector.extract.")) {
856 rename(F);
857 Type *Tys[] = {F->getReturnType(), F->arg_begin()->getType()};
858 NewFn = Intrinsic::getDeclaration(F->getParent(),
859 Intrinsic::vector_extract, Tys);
860 return true;
861 }
862
863 if (Name.startswith("experimental.vector.insert.")) {
864 rename(F);
865 auto Args = F->getFunctionType()->params();
866 Type *Tys[] = {Args[0], Args[1]};
867 NewFn = Intrinsic::getDeclaration(F->getParent(),
868 Intrinsic::vector_insert, Tys);
869 return true;
870 }
871
872 SmallVector<StringRef, 2> Groups;
873 static const Regex R("^experimental.vector.reduce.([a-z]+)\\.[a-z][0-9]+");
874 if (R.match(Name, &Groups)) {
875 Intrinsic::ID ID;
876 ID = StringSwitch<Intrinsic::ID>(Groups[1])
877 .Case("add", Intrinsic::vector_reduce_add)
878 .Case("mul", Intrinsic::vector_reduce_mul)
879 .Case("and", Intrinsic::vector_reduce_and)
880 .Case("or", Intrinsic::vector_reduce_or)
881 .Case("xor", Intrinsic::vector_reduce_xor)
882 .Case("smax", Intrinsic::vector_reduce_smax)
883 .Case("smin", Intrinsic::vector_reduce_smin)
884 .Case("umax", Intrinsic::vector_reduce_umax)
885 .Case("umin", Intrinsic::vector_reduce_umin)
886 .Case("fmax", Intrinsic::vector_reduce_fmax)
887 .Case("fmin", Intrinsic::vector_reduce_fmin)
888 .Default(Intrinsic::not_intrinsic);
889 if (ID != Intrinsic::not_intrinsic) {
890 rename(F);
891 auto Args = F->getFunctionType()->params();
892 NewFn = Intrinsic::getDeclaration(F->getParent(), ID, {Args[0]});
893 return true;
894 }
895 }
896 static const Regex R2(
897 "^experimental.vector.reduce.v2.([a-z]+)\\.[fi][0-9]+");
898 Groups.clear();
899 if (R2.match(Name, &Groups)) {
900 Intrinsic::ID ID = Intrinsic::not_intrinsic;
901 if (Groups[1] == "fadd")
902 ID = Intrinsic::vector_reduce_fadd;
903 if (Groups[1] == "fmul")
904 ID = Intrinsic::vector_reduce_fmul;
905 if (ID != Intrinsic::not_intrinsic) {
906 rename(F);
907 auto Args = F->getFunctionType()->params();
908 Type *Tys[] = {Args[1]};
909 NewFn = Intrinsic::getDeclaration(F->getParent(), ID, Tys);
910 return true;
911 }
912 }
913 break;
914 }
915 case 'f':
916 if (Name.startswith("flt.rounds")) {
917 rename(F);
918 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::get_rounding);
919 return true;
920 }
921 break;
922 case 'i':
923 case 'l': {
924 bool IsLifetimeStart = Name.startswith("lifetime.start");
925 if (IsLifetimeStart || Name.startswith("invariant.start")) {
926 Intrinsic::ID ID = IsLifetimeStart ?
927 Intrinsic::lifetime_start : Intrinsic::invariant_start;
928 auto Args = F->getFunctionType()->params();
929 Type* ObjectPtr[1] = {Args[1]};
930 if (F->getName() != Intrinsic::getName(ID, ObjectPtr, F->getParent())) {
931 rename(F);
932 NewFn = Intrinsic::getDeclaration(F->getParent(), ID, ObjectPtr);
933 return true;
934 }
935 }
936
937 bool IsLifetimeEnd = Name.startswith("lifetime.end");
938 if (IsLifetimeEnd || Name.startswith("invariant.end")) {
939 Intrinsic::ID ID = IsLifetimeEnd ?
940 Intrinsic::lifetime_end : Intrinsic::invariant_end;
941
942 auto Args = F->getFunctionType()->params();
943 Type* ObjectPtr[1] = {Args[IsLifetimeEnd ? 1 : 2]};
944 if (F->getName() != Intrinsic::getName(ID, ObjectPtr, F->getParent())) {
945 rename(F);
946 NewFn = Intrinsic::getDeclaration(F->getParent(), ID, ObjectPtr);
947 return true;
948 }
949 }
950 if (Name.startswith("invariant.group.barrier")) {
951 // Rename invariant.group.barrier to launder.invariant.group
952 auto Args = F->getFunctionType()->params();
953 Type* ObjectPtr[1] = {Args[0]};
954 rename(F);
955 NewFn = Intrinsic::getDeclaration(F->getParent(),
956 Intrinsic::launder_invariant_group, ObjectPtr);
957 return true;
958
959 }
960
961 break;
962 }
963 case 'm': {
964 if (Name.startswith("masked.load.")) {
965 Type *Tys[] = { F->getReturnType(), F->arg_begin()->getType() };
966 if (F->getName() !=
967 Intrinsic::getName(Intrinsic::masked_load, Tys, F->getParent())) {
968 rename(F);
969 NewFn = Intrinsic::getDeclaration(F->getParent(),
970 Intrinsic::masked_load,
971 Tys);
972 return true;
973 }
974 }
975 if (Name.startswith("masked.store.")) {
976 auto Args = F->getFunctionType()->params();
977 Type *Tys[] = { Args[0], Args[1] };
978 if (F->getName() !=
979 Intrinsic::getName(Intrinsic::masked_store, Tys, F->getParent())) {
980 rename(F);
981 NewFn = Intrinsic::getDeclaration(F->getParent(),
982 Intrinsic::masked_store,
983 Tys);
984 return true;
985 }
986 }
987 // Renaming gather/scatter intrinsics with no address space overloading
988 // to the new overload which includes an address space
989 if (Name.startswith("masked.gather.")) {
990 Type *Tys[] = {F->getReturnType(), F->arg_begin()->getType()};
991 if (F->getName() !=
992 Intrinsic::getName(Intrinsic::masked_gather, Tys, F->getParent())) {
993 rename(F);
994 NewFn = Intrinsic::getDeclaration(F->getParent(),
995 Intrinsic::masked_gather, Tys);
996 return true;
997 }
998 }
999 if (Name.startswith("masked.scatter.")) {
1000 auto Args = F->getFunctionType()->params();
1001 Type *Tys[] = {Args[0], Args[1]};
1002 if (F->getName() !=
1003 Intrinsic::getName(Intrinsic::masked_scatter, Tys, F->getParent())) {
1004 rename(F);
1005 NewFn = Intrinsic::getDeclaration(F->getParent(),
1006 Intrinsic::masked_scatter, Tys);
1007 return true;
1008 }
1009 }
1010 // Updating the memory intrinsics (memcpy/memmove/memset) that have an
1011 // alignment parameter to embedding the alignment as an attribute of
1012 // the pointer args.
1013 if (Name.startswith("memcpy.") && F->arg_size() == 5) {
1014 rename(F);
1015 // Get the types of dest, src, and len
1016 ArrayRef<Type *> ParamTypes = F->getFunctionType()->params().slice(0, 3);
1017 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::memcpy,
1018 ParamTypes);
1019 return true;
1020 }
1021 if (Name.startswith("memmove.") && F->arg_size() == 5) {
1022 rename(F);
1023 // Get the types of dest, src, and len
1024 ArrayRef<Type *> ParamTypes = F->getFunctionType()->params().slice(0, 3);
1025 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::memmove,
1026 ParamTypes);
1027 return true;
1028 }
1029 if (Name.startswith("memset.") && F->arg_size() == 5) {
1030 rename(F);
1031 // Get the types of dest, and len
1032 const auto *FT = F->getFunctionType();
1033 Type *ParamTypes[2] = {
1034 FT->getParamType(0), // Dest
1035 FT->getParamType(2) // len
1036 };
1037 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::memset,
1038 ParamTypes);
1039 return true;
1040 }
1041 break;
1042 }
1043 case 'n': {
1044 if (Name.startswith("nvvm.")) {
1045 Name = Name.substr(5);
1046
1047 // The following nvvm intrinsics correspond exactly to an LLVM intrinsic.
1048 Intrinsic::ID IID = StringSwitch<Intrinsic::ID>(Name)
1049 .Cases("brev32", "brev64", Intrinsic::bitreverse)
1050 .Case("clz.i", Intrinsic::ctlz)
1051 .Case("popc.i", Intrinsic::ctpop)
1052 .Default(Intrinsic::not_intrinsic);
1053 if (IID != Intrinsic::not_intrinsic && F->arg_size() == 1) {
1054 NewFn = Intrinsic::getDeclaration(F->getParent(), IID,
1055 {F->getReturnType()});
1056 return true;
1057 }
1058
1059 // The following nvvm intrinsics correspond exactly to an LLVM idiom, but
1060 // not to an intrinsic alone. We expand them in UpgradeIntrinsicCall.
1061 //
1062 // TODO: We could add lohi.i2d.
1063 bool Expand = StringSwitch<bool>(Name)
1064 .Cases("abs.i", "abs.ll", true)
1065 .Cases("clz.ll", "popc.ll", "h2f", true)
1066 .Cases("max.i", "max.ll", "max.ui", "max.ull", true)
1067 .Cases("min.i", "min.ll", "min.ui", "min.ull", true)
1068 .StartsWith("atomic.load.add.f32.p", true)
1069 .StartsWith("atomic.load.add.f64.p", true)
1070 .Default(false);
1071 if (Expand) {
1072 NewFn = nullptr;
1073 return true;
1074 }
1075 }
1076 break;
1077 }
1078 case 'o':
1079 // We only need to change the name to match the mangling including the
1080 // address space.
1081 if (Name.startswith("objectsize.")) {
1082 Type *Tys[2] = { F->getReturnType(), F->arg_begin()->getType() };
1083 if (F->arg_size() == 2 || F->arg_size() == 3 ||
1084 F->getName() !=
1085 Intrinsic::getName(Intrinsic::objectsize, Tys, F->getParent())) {
1086 rename(F);
1087 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::objectsize,
1088 Tys);
1089 return true;
1090 }
1091 }
1092 break;
1093
1094 case 'p':
1095 if (Name == "prefetch") {
1096 // Handle address space overloading.
1097 Type *Tys[] = {F->arg_begin()->getType()};
1098 if (F->getName() !=
1099 Intrinsic::getName(Intrinsic::prefetch, Tys, F->getParent())) {
1100 rename(F);
1101 NewFn =
1102 Intrinsic::getDeclaration(F->getParent(), Intrinsic::prefetch, Tys);
1103 return true;
1104 }
1105 } else if (Name.startswith("ptr.annotation.") && F->arg_size() == 4) {
1106 rename(F);
1107 NewFn = Intrinsic::getDeclaration(
1108 F->getParent(), Intrinsic::ptr_annotation,
1109 {F->arg_begin()->getType(), F->getArg(1)->getType()});
1110 return true;
1111 }
1112 break;
1113
1114 case 's':
1115 if (Name == "stackprotectorcheck") {
1116 NewFn = nullptr;
1117 return true;
1118 }
1119 break;
1120
1121 case 'v': {
1122 if (Name == "var.annotation" && F->arg_size() == 4) {
1123 rename(F);
1124 NewFn = Intrinsic::getDeclaration(
1125 F->getParent(), Intrinsic::var_annotation,
1126 {{F->arg_begin()->getType(), F->getArg(1)->getType()}});
1127 return true;
1128 }
1129 break;
1130 }
1131
1132 case 'x':
1133 if (UpgradeX86IntrinsicFunction(F, Name, NewFn))
1134 return true;
1135 }
1136
1137 auto *ST = dyn_cast<StructType>(F->getReturnType());
1138 if (ST && (!ST->isLiteral() || ST->isPacked())) {
1139 // Replace return type with literal non-packed struct. Only do this for
1140 // intrinsics declared to return a struct, not for intrinsics with
1141 // overloaded return type, in which case the exact struct type will be
1142 // mangled into the name.
1143 SmallVector<Intrinsic::IITDescriptor> Desc;
1144 Intrinsic::getIntrinsicInfoTableEntries(F->getIntrinsicID(), Desc);
1145 if (Desc.front().Kind == Intrinsic::IITDescriptor::Struct) {
1146 auto *FT = F->getFunctionType();
1147 auto *NewST = StructType::get(ST->getContext(), ST->elements());
1148 auto *NewFT = FunctionType::get(NewST, FT->params(), FT->isVarArg());
1149 std::string Name = F->getName().str();
1150 rename(F);
1151 NewFn = Function::Create(NewFT, F->getLinkage(), F->getAddressSpace(),
1152 Name, F->getParent());
1153
1154 // The new function may also need remangling.
1155 if (auto Result = llvm::Intrinsic::remangleIntrinsicFunction(NewFn))
1156 NewFn = *Result;
1157 return true;
1158 }
1159 }
1160
1161 // Remangle our intrinsic since we upgrade the mangling
1162 auto Result = llvm::Intrinsic::remangleIntrinsicFunction(F);
1163 if (Result != std::nullopt) {
1164 NewFn = *Result;
1165 return true;
1166 }
1167
1168 // This may not belong here. This function is effectively being overloaded
1169 // to both detect an intrinsic which needs upgrading, and to provide the
1170 // upgraded form of the intrinsic. We should perhaps have two separate
1171 // functions for this.
1172 return false;
1173}
1174
1175bool llvm::UpgradeIntrinsicFunction(Function *F, Function *&NewFn) {
1176 NewFn = nullptr;
1177 bool Upgraded = UpgradeIntrinsicFunction1(F, NewFn);
1178 assert(F != NewFn && "Intrinsic function upgraded to the same function")(static_cast <bool> (F != NewFn && "Intrinsic function upgraded to the same function"
) ? void (0) : __assert_fail ("F != NewFn && \"Intrinsic function upgraded to the same function\""
, "llvm/lib/IR/AutoUpgrade.cpp", 1178, __extension__ __PRETTY_FUNCTION__
))
;
1179
1180 // Upgrade intrinsic attributes. This does not change the function.
1181 if (NewFn)
1182 F = NewFn;
1183 if (Intrinsic::ID id = F->getIntrinsicID())
1184 F->setAttributes(Intrinsic::getAttributes(F->getContext(), id));
1185 return Upgraded;
1186}
1187
1188GlobalVariable *llvm::UpgradeGlobalVariable(GlobalVariable *GV) {
1189 if (!(GV->hasName() && (GV->getName() == "llvm.global_ctors" ||
1190 GV->getName() == "llvm.global_dtors")) ||
1191 !GV->hasInitializer())
1192 return nullptr;
1193 ArrayType *ATy = dyn_cast<ArrayType>(GV->getValueType());
1194 if (!ATy)
1195 return nullptr;
1196 StructType *STy = dyn_cast<StructType>(ATy->getElementType());
1197 if (!STy || STy->getNumElements() != 2)
1198 return nullptr;
1199
1200 LLVMContext &C = GV->getContext();
1201 IRBuilder<> IRB(C);
1202 auto EltTy = StructType::get(STy->getElementType(0), STy->getElementType(1),
1203 IRB.getInt8PtrTy());
1204 Constant *Init = GV->getInitializer();
1205 unsigned N = Init->getNumOperands();
1206 std::vector<Constant *> NewCtors(N);
1207 for (unsigned i = 0; i != N; ++i) {
1208 auto Ctor = cast<Constant>(Init->getOperand(i));
1209 NewCtors[i] = ConstantStruct::get(
1210 EltTy, Ctor->getAggregateElement(0u), Ctor->getAggregateElement(1),
1211 Constant::getNullValue(IRB.getInt8PtrTy()));
1212 }
1213 Constant *NewInit = ConstantArray::get(ArrayType::get(EltTy, N), NewCtors);
1214
1215 return new GlobalVariable(NewInit->getType(), false, GV->getLinkage(),
1216 NewInit, GV->getName());
1217}
1218
1219// Handles upgrading SSE2/AVX2/AVX512BW PSLLDQ intrinsics by converting them
1220// to byte shuffles.
1221static Value *UpgradeX86PSLLDQIntrinsics(IRBuilder<> &Builder,
1222 Value *Op, unsigned Shift) {
1223 auto *ResultTy = cast<FixedVectorType>(Op->getType());
1224 unsigned NumElts = ResultTy->getNumElements() * 8;
1225
1226 // Bitcast from a 64-bit element type to a byte element type.
1227 Type *VecTy = FixedVectorType::get(Builder.getInt8Ty(), NumElts);
1228 Op = Builder.CreateBitCast(Op, VecTy, "cast");
1229
1230 // We'll be shuffling in zeroes.
1231 Value *Res = Constant::getNullValue(VecTy);
1232
1233 // If shift is less than 16, emit a shuffle to move the bytes. Otherwise,
1234 // we'll just return the zero vector.
1235 if (Shift < 16) {
1236 int Idxs[64];
1237 // 256/512-bit version is split into 2/4 16-byte lanes.
1238 for (unsigned l = 0; l != NumElts; l += 16)
1239 for (unsigned i = 0; i != 16; ++i) {
1240 unsigned Idx = NumElts + i - Shift;
1241 if (Idx < NumElts)
1242 Idx -= NumElts - 16; // end of lane, switch operand.
1243 Idxs[l + i] = Idx + l;
1244 }
1245
1246 Res = Builder.CreateShuffleVector(Res, Op, ArrayRef(Idxs, NumElts));
1247 }
1248
1249 // Bitcast back to a 64-bit element type.
1250 return Builder.CreateBitCast(Res, ResultTy, "cast");
1251}
1252
1253// Handles upgrading SSE2/AVX2/AVX512BW PSRLDQ intrinsics by converting them
1254// to byte shuffles.
1255static Value *UpgradeX86PSRLDQIntrinsics(IRBuilder<> &Builder, Value *Op,
1256 unsigned Shift) {
1257 auto *ResultTy = cast<FixedVectorType>(Op->getType());
1258 unsigned NumElts = ResultTy->getNumElements() * 8;
1259
1260 // Bitcast from a 64-bit element type to a byte element type.
1261 Type *VecTy = FixedVectorType::get(Builder.getInt8Ty(), NumElts);
1262 Op = Builder.CreateBitCast(Op, VecTy, "cast");
1263
1264 // We'll be shuffling in zeroes.
1265 Value *Res = Constant::getNullValue(VecTy);
1266
1267 // If shift is less than 16, emit a shuffle to move the bytes. Otherwise,
1268 // we'll just return the zero vector.
1269 if (Shift < 16) {
1270 int Idxs[64];
1271 // 256/512-bit version is split into 2/4 16-byte lanes.
1272 for (unsigned l = 0; l != NumElts; l += 16)
1273 for (unsigned i = 0; i != 16; ++i) {
1274 unsigned Idx = i + Shift;
1275 if (Idx >= 16)
1276 Idx += NumElts - 16; // end of lane, switch operand.
1277 Idxs[l + i] = Idx + l;
1278 }
1279
1280 Res = Builder.CreateShuffleVector(Op, Res, ArrayRef(Idxs, NumElts));
1281 }
1282
1283 // Bitcast back to a 64-bit element type.
1284 return Builder.CreateBitCast(Res, ResultTy, "cast");
1285}
1286
1287static Value *getX86MaskVec(IRBuilder<> &Builder, Value *Mask,
1288 unsigned NumElts) {
1289 assert(isPowerOf2_32(NumElts) && "Expected power-of-2 mask elements")(static_cast <bool> (isPowerOf2_32(NumElts) && "Expected power-of-2 mask elements"
) ? void (0) : __assert_fail ("isPowerOf2_32(NumElts) && \"Expected power-of-2 mask elements\""
, "llvm/lib/IR/AutoUpgrade.cpp", 1289, __extension__ __PRETTY_FUNCTION__
))
;
1290 llvm::VectorType *MaskTy = FixedVectorType::get(
1291 Builder.getInt1Ty(), cast<IntegerType>(Mask->getType())->getBitWidth());
1292 Mask = Builder.CreateBitCast(Mask, MaskTy);
1293
1294 // If we have less than 8 elements (1, 2 or 4), then the starting mask was an
1295 // i8 and we need to extract down to the right number of elements.
1296 if (NumElts <= 4) {
1297 int Indices[4];
1298 for (unsigned i = 0; i != NumElts; ++i)
1299 Indices[i] = i;
1300 Mask = Builder.CreateShuffleVector(Mask, Mask, ArrayRef(Indices, NumElts),
1301 "extract");
1302 }
1303
1304 return Mask;
1305}
1306
1307static Value *EmitX86Select(IRBuilder<> &Builder, Value *Mask,
1308 Value *Op0, Value *Op1) {
1309 // If the mask is all ones just emit the first operation.
1310 if (const auto *C = dyn_cast<Constant>(Mask))
1311 if (C->isAllOnesValue())
1312 return Op0;
1313
1314 Mask = getX86MaskVec(Builder, Mask,
1315 cast<FixedVectorType>(Op0->getType())->getNumElements());
1316 return Builder.CreateSelect(Mask, Op0, Op1);
1317}
1318
1319static Value *EmitX86ScalarSelect(IRBuilder<> &Builder, Value *Mask,
1320 Value *Op0, Value *Op1) {
1321 // If the mask is all ones just emit the first operation.
1322 if (const auto *C = dyn_cast<Constant>(Mask))
1323 if (C->isAllOnesValue())
1324 return Op0;
1325
1326 auto *MaskTy = FixedVectorType::get(Builder.getInt1Ty(),
1327 Mask->getType()->getIntegerBitWidth());
1328 Mask = Builder.CreateBitCast(Mask, MaskTy);
1329 Mask = Builder.CreateExtractElement(Mask, (uint64_t)0);
1330 return Builder.CreateSelect(Mask, Op0, Op1);
1331}
1332
1333// Handle autoupgrade for masked PALIGNR and VALIGND/Q intrinsics.
1334// PALIGNR handles large immediates by shifting while VALIGN masks the immediate
1335// so we need to handle both cases. VALIGN also doesn't have 128-bit lanes.
1336static Value *UpgradeX86ALIGNIntrinsics(IRBuilder<> &Builder, Value *Op0,
1337 Value *Op1, Value *Shift,
1338 Value *Passthru, Value *Mask,
1339 bool IsVALIGN) {
1340 unsigned ShiftVal = cast<llvm::ConstantInt>(Shift)->getZExtValue();
1341
1342 unsigned NumElts = cast<FixedVectorType>(Op0->getType())->getNumElements();
1343 assert((IsVALIGN || NumElts % 16 == 0) && "Illegal NumElts for PALIGNR!")(static_cast <bool> ((IsVALIGN || NumElts % 16 == 0) &&
"Illegal NumElts for PALIGNR!") ? void (0) : __assert_fail (
"(IsVALIGN || NumElts % 16 == 0) && \"Illegal NumElts for PALIGNR!\""
, "llvm/lib/IR/AutoUpgrade.cpp", 1343, __extension__ __PRETTY_FUNCTION__
))
;
1344 assert((!IsVALIGN || NumElts <= 16) && "NumElts too large for VALIGN!")(static_cast <bool> ((!IsVALIGN || NumElts <= 16) &&
"NumElts too large for VALIGN!") ? void (0) : __assert_fail (
"(!IsVALIGN || NumElts <= 16) && \"NumElts too large for VALIGN!\""
, "llvm/lib/IR/AutoUpgrade.cpp", 1344, __extension__ __PRETTY_FUNCTION__
))
;
1345 assert(isPowerOf2_32(NumElts) && "NumElts not a power of 2!")(static_cast <bool> (isPowerOf2_32(NumElts) && "NumElts not a power of 2!"
) ? void (0) : __assert_fail ("isPowerOf2_32(NumElts) && \"NumElts not a power of 2!\""
, "llvm/lib/IR/AutoUpgrade.cpp", 1345, __extension__ __PRETTY_FUNCTION__
))
;
1346
1347 // Mask the immediate for VALIGN.
1348 if (IsVALIGN)
1349 ShiftVal &= (NumElts - 1);
1350
1351 // If palignr is shifting the pair of vectors more than the size of two
1352 // lanes, emit zero.
1353 if (ShiftVal >= 32)
1354 return llvm::Constant::getNullValue(Op0->getType());
1355
1356 // If palignr is shifting the pair of input vectors more than one lane,
1357 // but less than two lanes, convert to shifting in zeroes.
1358 if (ShiftVal > 16) {
1359 ShiftVal -= 16;
1360 Op1 = Op0;
1361 Op0 = llvm::Constant::getNullValue(Op0->getType());
1362 }
1363
1364 int Indices[64];
1365 // 256-bit palignr operates on 128-bit lanes so we need to handle that
1366 for (unsigned l = 0; l < NumElts; l += 16) {
1367 for (unsigned i = 0; i != 16; ++i) {
1368 unsigned Idx = ShiftVal + i;
1369 if (!IsVALIGN && Idx >= 16) // Disable wrap for VALIGN.
1370 Idx += NumElts - 16; // End of lane, switch operand.
1371 Indices[l + i] = Idx + l;
1372 }
1373 }
1374
1375 Value *Align = Builder.CreateShuffleVector(
1376 Op1, Op0, ArrayRef(Indices, NumElts), "palignr");
1377
1378 return EmitX86Select(Builder, Mask, Align, Passthru);
1379}
1380
1381static Value *UpgradeX86VPERMT2Intrinsics(IRBuilder<> &Builder, CallBase &CI,
1382 bool ZeroMask, bool IndexForm) {
1383 Type *Ty = CI.getType();
1384 unsigned VecWidth = Ty->getPrimitiveSizeInBits();
1385 unsigned EltWidth = Ty->getScalarSizeInBits();
1386 bool IsFloat = Ty->isFPOrFPVectorTy();
1387 Intrinsic::ID IID;
1388 if (VecWidth == 128 && EltWidth == 32 && IsFloat)
1389 IID = Intrinsic::x86_avx512_vpermi2var_ps_128;
1390 else if (VecWidth == 128 && EltWidth == 32 && !IsFloat)
1391 IID = Intrinsic::x86_avx512_vpermi2var_d_128;
1392 else if (VecWidth == 128 && EltWidth == 64 && IsFloat)
1393 IID = Intrinsic::x86_avx512_vpermi2var_pd_128;
1394 else if (VecWidth == 128 && EltWidth == 64 && !IsFloat)
1395 IID = Intrinsic::x86_avx512_vpermi2var_q_128;
1396 else if (VecWidth == 256 && EltWidth == 32 && IsFloat)
1397 IID = Intrinsic::x86_avx512_vpermi2var_ps_256;
1398 else if (VecWidth == 256 && EltWidth == 32 && !IsFloat)
1399 IID = Intrinsic::x86_avx512_vpermi2var_d_256;
1400 else if (VecWidth == 256 && EltWidth == 64 && IsFloat)
1401 IID = Intrinsic::x86_avx512_vpermi2var_pd_256;
1402 else if (VecWidth == 256 && EltWidth == 64 && !IsFloat)
1403 IID = Intrinsic::x86_avx512_vpermi2var_q_256;
1404 else if (VecWidth == 512 && EltWidth == 32 && IsFloat)
1405 IID = Intrinsic::x86_avx512_vpermi2var_ps_512;
1406 else if (VecWidth == 512 && EltWidth == 32 && !IsFloat)
1407 IID = Intrinsic::x86_avx512_vpermi2var_d_512;
1408 else if (VecWidth == 512 && EltWidth == 64 && IsFloat)
1409 IID = Intrinsic::x86_avx512_vpermi2var_pd_512;
1410 else if (VecWidth == 512 && EltWidth == 64 && !IsFloat)
1411 IID = Intrinsic::x86_avx512_vpermi2var_q_512;
1412 else if (VecWidth == 128 && EltWidth == 16)
1413 IID = Intrinsic::x86_avx512_vpermi2var_hi_128;
1414 else if (VecWidth == 256 && EltWidth == 16)
1415 IID = Intrinsic::x86_avx512_vpermi2var_hi_256;
1416 else if (VecWidth == 512 && EltWidth == 16)
1417 IID = Intrinsic::x86_avx512_vpermi2var_hi_512;
1418 else if (VecWidth == 128 && EltWidth == 8)
1419 IID = Intrinsic::x86_avx512_vpermi2var_qi_128;
1420 else if (VecWidth == 256 && EltWidth == 8)
1421 IID = Intrinsic::x86_avx512_vpermi2var_qi_256;
1422 else if (VecWidth == 512 && EltWidth == 8)
1423 IID = Intrinsic::x86_avx512_vpermi2var_qi_512;
1424 else
1425 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1425)
;
1426
1427 Value *Args[] = { CI.getArgOperand(0) , CI.getArgOperand(1),
1428 CI.getArgOperand(2) };
1429
1430 // If this isn't index form we need to swap operand 0 and 1.
1431 if (!IndexForm)
1432 std::swap(Args[0], Args[1]);
1433
1434 Value *V = Builder.CreateCall(Intrinsic::getDeclaration(CI.getModule(), IID),
1435 Args);
1436 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(Ty)
1437 : Builder.CreateBitCast(CI.getArgOperand(1),
1438 Ty);
1439 return EmitX86Select(Builder, CI.getArgOperand(3), V, PassThru);
1440}
1441
1442static Value *UpgradeX86BinaryIntrinsics(IRBuilder<> &Builder, CallBase &CI,
1443 Intrinsic::ID IID) {
1444 Type *Ty = CI.getType();
1445 Value *Op0 = CI.getOperand(0);
1446 Value *Op1 = CI.getOperand(1);
1447 Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID, Ty);
1448 Value *Res = Builder.CreateCall(Intrin, {Op0, Op1});
1449
1450 if (CI.arg_size() == 4) { // For masked intrinsics.
1451 Value *VecSrc = CI.getOperand(2);
1452 Value *Mask = CI.getOperand(3);
1453 Res = EmitX86Select(Builder, Mask, Res, VecSrc);
1454 }
1455 return Res;
1456}
1457
1458static Value *upgradeX86Rotate(IRBuilder<> &Builder, CallBase &CI,
1459 bool IsRotateRight) {
1460 Type *Ty = CI.getType();
1461 Value *Src = CI.getArgOperand(0);
1462 Value *Amt = CI.getArgOperand(1);
1463
1464 // Amount may be scalar immediate, in which case create a splat vector.
1465 // Funnel shifts amounts are treated as modulo and types are all power-of-2 so
1466 // we only care about the lowest log2 bits anyway.
1467 if (Amt->getType() != Ty) {
1468 unsigned NumElts = cast<FixedVectorType>(Ty)->getNumElements();
1469 Amt = Builder.CreateIntCast(Amt, Ty->getScalarType(), false);
1470 Amt = Builder.CreateVectorSplat(NumElts, Amt);
1471 }
1472
1473 Intrinsic::ID IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl;
1474 Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID, Ty);
1475 Value *Res = Builder.CreateCall(Intrin, {Src, Src, Amt});
1476
1477 if (CI.arg_size() == 4) { // For masked intrinsics.
1478 Value *VecSrc = CI.getOperand(2);
1479 Value *Mask = CI.getOperand(3);
1480 Res = EmitX86Select(Builder, Mask, Res, VecSrc);
1481 }
1482 return Res;
1483}
1484
1485static Value *upgradeX86vpcom(IRBuilder<> &Builder, CallBase &CI, unsigned Imm,
1486 bool IsSigned) {
1487 Type *Ty = CI.getType();
1488 Value *LHS = CI.getArgOperand(0);
1489 Value *RHS = CI.getArgOperand(1);
1490
1491 CmpInst::Predicate Pred;
1492 switch (Imm) {
1493 case 0x0:
1494 Pred = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
1495 break;
1496 case 0x1:
1497 Pred = IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
1498 break;
1499 case 0x2:
1500 Pred = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
1501 break;
1502 case 0x3:
1503 Pred = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
1504 break;
1505 case 0x4:
1506 Pred = ICmpInst::ICMP_EQ;
1507 break;
1508 case 0x5:
1509 Pred = ICmpInst::ICMP_NE;
1510 break;
1511 case 0x6:
1512 return Constant::getNullValue(Ty); // FALSE
1513 case 0x7:
1514 return Constant::getAllOnesValue(Ty); // TRUE
1515 default:
1516 llvm_unreachable("Unknown XOP vpcom/vpcomu predicate")::llvm::llvm_unreachable_internal("Unknown XOP vpcom/vpcomu predicate"
, "llvm/lib/IR/AutoUpgrade.cpp", 1516)
;
1517 }
1518
1519 Value *Cmp = Builder.CreateICmp(Pred, LHS, RHS);
1520 Value *Ext = Builder.CreateSExt(Cmp, Ty);
1521 return Ext;
1522}
1523
1524static Value *upgradeX86ConcatShift(IRBuilder<> &Builder, CallBase &CI,
1525 bool IsShiftRight, bool ZeroMask) {
1526 Type *Ty = CI.getType();
1527 Value *Op0 = CI.getArgOperand(0);
1528 Value *Op1 = CI.getArgOperand(1);
1529 Value *Amt = CI.getArgOperand(2);
1530
1531 if (IsShiftRight)
1532 std::swap(Op0, Op1);
1533
1534 // Amount may be scalar immediate, in which case create a splat vector.
1535 // Funnel shifts amounts are treated as modulo and types are all power-of-2 so
1536 // we only care about the lowest log2 bits anyway.
1537 if (Amt->getType() != Ty) {
1538 unsigned NumElts = cast<FixedVectorType>(Ty)->getNumElements();
1539 Amt = Builder.CreateIntCast(Amt, Ty->getScalarType(), false);
1540 Amt = Builder.CreateVectorSplat(NumElts, Amt);
1541 }
1542
1543 Intrinsic::ID IID = IsShiftRight ? Intrinsic::fshr : Intrinsic::fshl;
1544 Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID, Ty);
1545 Value *Res = Builder.CreateCall(Intrin, {Op0, Op1, Amt});
1546
1547 unsigned NumArgs = CI.arg_size();
1548 if (NumArgs >= 4) { // For masked intrinsics.
1549 Value *VecSrc = NumArgs == 5 ? CI.getArgOperand(3) :
1550 ZeroMask ? ConstantAggregateZero::get(CI.getType()) :
1551 CI.getArgOperand(0);
1552 Value *Mask = CI.getOperand(NumArgs - 1);
1553 Res = EmitX86Select(Builder, Mask, Res, VecSrc);
1554 }
1555 return Res;
1556}
1557
1558static Value *UpgradeMaskedStore(IRBuilder<> &Builder,
1559 Value *Ptr, Value *Data, Value *Mask,
1560 bool Aligned) {
1561 // Cast the pointer to the right type.
1562 Ptr = Builder.CreateBitCast(Ptr,
1563 llvm::PointerType::getUnqual(Data->getType()));
1564 const Align Alignment =
1565 Aligned
1566 ? Align(Data->getType()->getPrimitiveSizeInBits().getFixedValue() / 8)
1567 : Align(1);
1568
1569 // If the mask is all ones just emit a regular store.
1570 if (const auto *C = dyn_cast<Constant>(Mask))
1571 if (C->isAllOnesValue())
1572 return Builder.CreateAlignedStore(Data, Ptr, Alignment);
1573
1574 // Convert the mask from an integer type to a vector of i1.
1575 unsigned NumElts = cast<FixedVectorType>(Data->getType())->getNumElements();
1576 Mask = getX86MaskVec(Builder, Mask, NumElts);
1577 return Builder.CreateMaskedStore(Data, Ptr, Alignment, Mask);
1578}
1579
1580static Value *UpgradeMaskedLoad(IRBuilder<> &Builder,
1581 Value *Ptr, Value *Passthru, Value *Mask,
1582 bool Aligned) {
1583 Type *ValTy = Passthru->getType();
1584 // Cast the pointer to the right type.
1585 Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(ValTy));
1586 const Align Alignment =
1587 Aligned
1588 ? Align(
1589 Passthru->getType()->getPrimitiveSizeInBits().getFixedValue() /
1590 8)
1591 : Align(1);
1592
1593 // If the mask is all ones just emit a regular store.
1594 if (const auto *C = dyn_cast<Constant>(Mask))
1595 if (C->isAllOnesValue())
1596 return Builder.CreateAlignedLoad(ValTy, Ptr, Alignment);
1597
1598 // Convert the mask from an integer type to a vector of i1.
1599 unsigned NumElts = cast<FixedVectorType>(ValTy)->getNumElements();
1600 Mask = getX86MaskVec(Builder, Mask, NumElts);
1601 return Builder.CreateMaskedLoad(ValTy, Ptr, Alignment, Mask, Passthru);
1602}
1603
1604static Value *upgradeAbs(IRBuilder<> &Builder, CallBase &CI) {
1605 Type *Ty = CI.getType();
1606 Value *Op0 = CI.getArgOperand(0);
1607 Function *F = Intrinsic::getDeclaration(CI.getModule(), Intrinsic::abs, Ty);
1608 Value *Res = Builder.CreateCall(F, {Op0, Builder.getInt1(false)});
1609 if (CI.arg_size() == 3)
1610 Res = EmitX86Select(Builder, CI.getArgOperand(2), Res, CI.getArgOperand(1));
1611 return Res;
1612}
1613
1614static Value *upgradePMULDQ(IRBuilder<> &Builder, CallBase &CI, bool IsSigned) {
1615 Type *Ty = CI.getType();
1616
1617 // Arguments have a vXi32 type so cast to vXi64.
1618 Value *LHS = Builder.CreateBitCast(CI.getArgOperand(0), Ty);
1619 Value *RHS = Builder.CreateBitCast(CI.getArgOperand(1), Ty);
1620
1621 if (IsSigned) {
1622 // Shift left then arithmetic shift right.
1623 Constant *ShiftAmt = ConstantInt::get(Ty, 32);
1624 LHS = Builder.CreateShl(LHS, ShiftAmt);
1625 LHS = Builder.CreateAShr(LHS, ShiftAmt);
1626 RHS = Builder.CreateShl(RHS, ShiftAmt);
1627 RHS = Builder.CreateAShr(RHS, ShiftAmt);
1628 } else {
1629 // Clear the upper bits.
1630 Constant *Mask = ConstantInt::get(Ty, 0xffffffff);
1631 LHS = Builder.CreateAnd(LHS, Mask);
1632 RHS = Builder.CreateAnd(RHS, Mask);
1633 }
1634
1635 Value *Res = Builder.CreateMul(LHS, RHS);
1636
1637 if (CI.arg_size() == 4)
1638 Res = EmitX86Select(Builder, CI.getArgOperand(3), Res, CI.getArgOperand(2));
1639
1640 return Res;
1641}
1642
1643// Applying mask on vector of i1's and make sure result is at least 8 bits wide.
1644static Value *ApplyX86MaskOn1BitsVec(IRBuilder<> &Builder, Value *Vec,
1645 Value *Mask) {
1646 unsigned NumElts = cast<FixedVectorType>(Vec->getType())->getNumElements();
1647 if (Mask) {
1648 const auto *C = dyn_cast<Constant>(Mask);
1649 if (!C || !C->isAllOnesValue())
1650 Vec = Builder.CreateAnd(Vec, getX86MaskVec(Builder, Mask, NumElts));
1651 }
1652
1653 if (NumElts < 8) {
1654 int Indices[8];
1655 for (unsigned i = 0; i != NumElts; ++i)
1656 Indices[i] = i;
1657 for (unsigned i = NumElts; i != 8; ++i)
1658 Indices[i] = NumElts + i % NumElts;
1659 Vec = Builder.CreateShuffleVector(Vec,
1660 Constant::getNullValue(Vec->getType()),
1661 Indices);
1662 }
1663 return Builder.CreateBitCast(Vec, Builder.getIntNTy(std::max(NumElts, 8U)));
1664}
1665
1666static Value *upgradeMaskedCompare(IRBuilder<> &Builder, CallBase &CI,
1667 unsigned CC, bool Signed) {
1668 Value *Op0 = CI.getArgOperand(0);
1669 unsigned NumElts = cast<FixedVectorType>(Op0->getType())->getNumElements();
1670
1671 Value *Cmp;
1672 if (CC == 3) {
1673 Cmp = Constant::getNullValue(
1674 FixedVectorType::get(Builder.getInt1Ty(), NumElts));
1675 } else if (CC == 7) {
1676 Cmp = Constant::getAllOnesValue(
1677 FixedVectorType::get(Builder.getInt1Ty(), NumElts));
1678 } else {
1679 ICmpInst::Predicate Pred;
1680 switch (CC) {
1681 default: llvm_unreachable("Unknown condition code")::llvm::llvm_unreachable_internal("Unknown condition code", "llvm/lib/IR/AutoUpgrade.cpp"
, 1681)
;
1682 case 0: Pred = ICmpInst::ICMP_EQ; break;
1683 case 1: Pred = Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; break;
1684 case 2: Pred = Signed ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; break;
1685 case 4: Pred = ICmpInst::ICMP_NE; break;
1686 case 5: Pred = Signed ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; break;
1687 case 6: Pred = Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; break;
1688 }
1689 Cmp = Builder.CreateICmp(Pred, Op0, CI.getArgOperand(1));
1690 }
1691
1692 Value *Mask = CI.getArgOperand(CI.arg_size() - 1);
1693
1694 return ApplyX86MaskOn1BitsVec(Builder, Cmp, Mask);
1695}
1696
1697// Replace a masked intrinsic with an older unmasked intrinsic.
1698static Value *UpgradeX86MaskedShift(IRBuilder<> &Builder, CallBase &CI,
1699 Intrinsic::ID IID) {
1700 Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID);
1701 Value *Rep = Builder.CreateCall(Intrin,
1702 { CI.getArgOperand(0), CI.getArgOperand(1) });
1703 return EmitX86Select(Builder, CI.getArgOperand(3), Rep, CI.getArgOperand(2));
1704}
1705
1706static Value* upgradeMaskedMove(IRBuilder<> &Builder, CallBase &CI) {
1707 Value* A = CI.getArgOperand(0);
1708 Value* B = CI.getArgOperand(1);
1709 Value* Src = CI.getArgOperand(2);
1710 Value* Mask = CI.getArgOperand(3);
1711
1712 Value* AndNode = Builder.CreateAnd(Mask, APInt(8, 1));
1713 Value* Cmp = Builder.CreateIsNotNull(AndNode);
1714 Value* Extract1 = Builder.CreateExtractElement(B, (uint64_t)0);
1715 Value* Extract2 = Builder.CreateExtractElement(Src, (uint64_t)0);
1716 Value* Select = Builder.CreateSelect(Cmp, Extract1, Extract2);
1717 return Builder.CreateInsertElement(A, Select, (uint64_t)0);
1718}
1719
1720
1721static Value* UpgradeMaskToInt(IRBuilder<> &Builder, CallBase &CI) {
1722 Value* Op = CI.getArgOperand(0);
1723 Type* ReturnOp = CI.getType();
1724 unsigned NumElts = cast<FixedVectorType>(CI.getType())->getNumElements();
1725 Value *Mask = getX86MaskVec(Builder, Op, NumElts);
1726 return Builder.CreateSExt(Mask, ReturnOp, "vpmovm2");
1727}
1728
1729// Replace intrinsic with unmasked version and a select.
1730static bool upgradeAVX512MaskToSelect(StringRef Name, IRBuilder<> &Builder,
1731 CallBase &CI, Value *&Rep) {
1732 Name = Name.substr(12); // Remove avx512.mask.
1733
1734 unsigned VecWidth = CI.getType()->getPrimitiveSizeInBits();
1735 unsigned EltWidth = CI.getType()->getScalarSizeInBits();
1736 Intrinsic::ID IID;
1737 if (Name.startswith("max.p")) {
1738 if (VecWidth == 128 && EltWidth == 32)
1739 IID = Intrinsic::x86_sse_max_ps;
1740 else if (VecWidth == 128 && EltWidth == 64)
1741 IID = Intrinsic::x86_sse2_max_pd;
1742 else if (VecWidth == 256 && EltWidth == 32)
1743 IID = Intrinsic::x86_avx_max_ps_256;
1744 else if (VecWidth == 256 && EltWidth == 64)
1745 IID = Intrinsic::x86_avx_max_pd_256;
1746 else
1747 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1747)
;
1748 } else if (Name.startswith("min.p")) {
1749 if (VecWidth == 128 && EltWidth == 32)
1750 IID = Intrinsic::x86_sse_min_ps;
1751 else if (VecWidth == 128 && EltWidth == 64)
1752 IID = Intrinsic::x86_sse2_min_pd;
1753 else if (VecWidth == 256 && EltWidth == 32)
1754 IID = Intrinsic::x86_avx_min_ps_256;
1755 else if (VecWidth == 256 && EltWidth == 64)
1756 IID = Intrinsic::x86_avx_min_pd_256;
1757 else
1758 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1758)
;
1759 } else if (Name.startswith("pshuf.b.")) {
1760 if (VecWidth == 128)
1761 IID = Intrinsic::x86_ssse3_pshuf_b_128;
1762 else if (VecWidth == 256)
1763 IID = Intrinsic::x86_avx2_pshuf_b;
1764 else if (VecWidth == 512)
1765 IID = Intrinsic::x86_avx512_pshuf_b_512;
1766 else
1767 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1767)
;
1768 } else if (Name.startswith("pmul.hr.sw.")) {
1769 if (VecWidth == 128)
1770 IID = Intrinsic::x86_ssse3_pmul_hr_sw_128;
1771 else if (VecWidth == 256)
1772 IID = Intrinsic::x86_avx2_pmul_hr_sw;
1773 else if (VecWidth == 512)
1774 IID = Intrinsic::x86_avx512_pmul_hr_sw_512;
1775 else
1776 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1776)
;
1777 } else if (Name.startswith("pmulh.w.")) {
1778 if (VecWidth == 128)
1779 IID = Intrinsic::x86_sse2_pmulh_w;
1780 else if (VecWidth == 256)
1781 IID = Intrinsic::x86_avx2_pmulh_w;
1782 else if (VecWidth == 512)
1783 IID = Intrinsic::x86_avx512_pmulh_w_512;
1784 else
1785 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1785)
;
1786 } else if (Name.startswith("pmulhu.w.")) {
1787 if (VecWidth == 128)
1788 IID = Intrinsic::x86_sse2_pmulhu_w;
1789 else if (VecWidth == 256)
1790 IID = Intrinsic::x86_avx2_pmulhu_w;
1791 else if (VecWidth == 512)
1792 IID = Intrinsic::x86_avx512_pmulhu_w_512;
1793 else
1794 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1794)
;
1795 } else if (Name.startswith("pmaddw.d.")) {
1796 if (VecWidth == 128)
1797 IID = Intrinsic::x86_sse2_pmadd_wd;
1798 else if (VecWidth == 256)
1799 IID = Intrinsic::x86_avx2_pmadd_wd;
1800 else if (VecWidth == 512)
1801 IID = Intrinsic::x86_avx512_pmaddw_d_512;
1802 else
1803 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1803)
;
1804 } else if (Name.startswith("pmaddubs.w.")) {
1805 if (VecWidth == 128)
1806 IID = Intrinsic::x86_ssse3_pmadd_ub_sw_128;
1807 else if (VecWidth == 256)
1808 IID = Intrinsic::x86_avx2_pmadd_ub_sw;
1809 else if (VecWidth == 512)
1810 IID = Intrinsic::x86_avx512_pmaddubs_w_512;
1811 else
1812 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1812)
;
1813 } else if (Name.startswith("packsswb.")) {
1814 if (VecWidth == 128)
1815 IID = Intrinsic::x86_sse2_packsswb_128;
1816 else if (VecWidth == 256)
1817 IID = Intrinsic::x86_avx2_packsswb;
1818 else if (VecWidth == 512)
1819 IID = Intrinsic::x86_avx512_packsswb_512;
1820 else
1821 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1821)
;
1822 } else if (Name.startswith("packssdw.")) {
1823 if (VecWidth == 128)
1824 IID = Intrinsic::x86_sse2_packssdw_128;
1825 else if (VecWidth == 256)
1826 IID = Intrinsic::x86_avx2_packssdw;
1827 else if (VecWidth == 512)
1828 IID = Intrinsic::x86_avx512_packssdw_512;
1829 else
1830 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1830)
;
1831 } else if (Name.startswith("packuswb.")) {
1832 if (VecWidth == 128)
1833 IID = Intrinsic::x86_sse2_packuswb_128;
1834 else if (VecWidth == 256)
1835 IID = Intrinsic::x86_avx2_packuswb;
1836 else if (VecWidth == 512)
1837 IID = Intrinsic::x86_avx512_packuswb_512;
1838 else
1839 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1839)
;
1840 } else if (Name.startswith("packusdw.")) {
1841 if (VecWidth == 128)
1842 IID = Intrinsic::x86_sse41_packusdw;
1843 else if (VecWidth == 256)
1844 IID = Intrinsic::x86_avx2_packusdw;
1845 else if (VecWidth == 512)
1846 IID = Intrinsic::x86_avx512_packusdw_512;
1847 else
1848 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1848)
;
1849 } else if (Name.startswith("vpermilvar.")) {
1850 if (VecWidth == 128 && EltWidth == 32)
1851 IID = Intrinsic::x86_avx_vpermilvar_ps;
1852 else if (VecWidth == 128 && EltWidth == 64)
1853 IID = Intrinsic::x86_avx_vpermilvar_pd;
1854 else if (VecWidth == 256 && EltWidth == 32)
1855 IID = Intrinsic::x86_avx_vpermilvar_ps_256;
1856 else if (VecWidth == 256 && EltWidth == 64)
1857 IID = Intrinsic::x86_avx_vpermilvar_pd_256;
1858 else if (VecWidth == 512 && EltWidth == 32)
1859 IID = Intrinsic::x86_avx512_vpermilvar_ps_512;
1860 else if (VecWidth == 512 && EltWidth == 64)
1861 IID = Intrinsic::x86_avx512_vpermilvar_pd_512;
1862 else
1863 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1863)
;
1864 } else if (Name == "cvtpd2dq.256") {
1865 IID = Intrinsic::x86_avx_cvt_pd2dq_256;
1866 } else if (Name == "cvtpd2ps.256") {
1867 IID = Intrinsic::x86_avx_cvt_pd2_ps_256;
1868 } else if (Name == "cvttpd2dq.256") {
1869 IID = Intrinsic::x86_avx_cvtt_pd2dq_256;
1870 } else if (Name == "cvttps2dq.128") {
1871 IID = Intrinsic::x86_sse2_cvttps2dq;
1872 } else if (Name == "cvttps2dq.256") {
1873 IID = Intrinsic::x86_avx_cvtt_ps2dq_256;
1874 } else if (Name.startswith("permvar.")) {
1875 bool IsFloat = CI.getType()->isFPOrFPVectorTy();
1876 if (VecWidth == 256 && EltWidth == 32 && IsFloat)
1877 IID = Intrinsic::x86_avx2_permps;
1878 else if (VecWidth == 256 && EltWidth == 32 && !IsFloat)
1879 IID = Intrinsic::x86_avx2_permd;
1880 else if (VecWidth == 256 && EltWidth == 64 && IsFloat)
1881 IID = Intrinsic::x86_avx512_permvar_df_256;
1882 else if (VecWidth == 256 && EltWidth == 64 && !IsFloat)
1883 IID = Intrinsic::x86_avx512_permvar_di_256;
1884 else if (VecWidth == 512 && EltWidth == 32 && IsFloat)
1885 IID = Intrinsic::x86_avx512_permvar_sf_512;
1886 else if (VecWidth == 512 && EltWidth == 32 && !IsFloat)
1887 IID = Intrinsic::x86_avx512_permvar_si_512;
1888 else if (VecWidth == 512 && EltWidth == 64 && IsFloat)
1889 IID = Intrinsic::x86_avx512_permvar_df_512;
1890 else if (VecWidth == 512 && EltWidth == 64 && !IsFloat)
1891 IID = Intrinsic::x86_avx512_permvar_di_512;
1892 else if (VecWidth == 128 && EltWidth == 16)
1893 IID = Intrinsic::x86_avx512_permvar_hi_128;
1894 else if (VecWidth == 256 && EltWidth == 16)
1895 IID = Intrinsic::x86_avx512_permvar_hi_256;
1896 else if (VecWidth == 512 && EltWidth == 16)
1897 IID = Intrinsic::x86_avx512_permvar_hi_512;
1898 else if (VecWidth == 128 && EltWidth == 8)
1899 IID = Intrinsic::x86_avx512_permvar_qi_128;
1900 else if (VecWidth == 256 && EltWidth == 8)
1901 IID = Intrinsic::x86_avx512_permvar_qi_256;
1902 else if (VecWidth == 512 && EltWidth == 8)
1903 IID = Intrinsic::x86_avx512_permvar_qi_512;
1904 else
1905 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1905)
;
1906 } else if (Name.startswith("dbpsadbw.")) {
1907 if (VecWidth == 128)
1908 IID = Intrinsic::x86_avx512_dbpsadbw_128;
1909 else if (VecWidth == 256)
1910 IID = Intrinsic::x86_avx512_dbpsadbw_256;
1911 else if (VecWidth == 512)
1912 IID = Intrinsic::x86_avx512_dbpsadbw_512;
1913 else
1914 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1914)
;
1915 } else if (Name.startswith("pmultishift.qb.")) {
1916 if (VecWidth == 128)
1917 IID = Intrinsic::x86_avx512_pmultishift_qb_128;
1918 else if (VecWidth == 256)
1919 IID = Intrinsic::x86_avx512_pmultishift_qb_256;
1920 else if (VecWidth == 512)
1921 IID = Intrinsic::x86_avx512_pmultishift_qb_512;
1922 else
1923 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1923)
;
1924 } else if (Name.startswith("conflict.")) {
1925 if (Name[9] == 'd' && VecWidth == 128)
1926 IID = Intrinsic::x86_avx512_conflict_d_128;
1927 else if (Name[9] == 'd' && VecWidth == 256)
1928 IID = Intrinsic::x86_avx512_conflict_d_256;
1929 else if (Name[9] == 'd' && VecWidth == 512)
1930 IID = Intrinsic::x86_avx512_conflict_d_512;
1931 else if (Name[9] == 'q' && VecWidth == 128)
1932 IID = Intrinsic::x86_avx512_conflict_q_128;
1933 else if (Name[9] == 'q' && VecWidth == 256)
1934 IID = Intrinsic::x86_avx512_conflict_q_256;
1935 else if (Name[9] == 'q' && VecWidth == 512)
1936 IID = Intrinsic::x86_avx512_conflict_q_512;
1937 else
1938 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1938)
;
1939 } else if (Name.startswith("pavg.")) {
1940 if (Name[5] == 'b' && VecWidth == 128)
1941 IID = Intrinsic::x86_sse2_pavg_b;
1942 else if (Name[5] == 'b' && VecWidth == 256)
1943 IID = Intrinsic::x86_avx2_pavg_b;
1944 else if (Name[5] == 'b' && VecWidth == 512)
1945 IID = Intrinsic::x86_avx512_pavg_b_512;
1946 else if (Name[5] == 'w' && VecWidth == 128)
1947 IID = Intrinsic::x86_sse2_pavg_w;
1948 else if (Name[5] == 'w' && VecWidth == 256)
1949 IID = Intrinsic::x86_avx2_pavg_w;
1950 else if (Name[5] == 'w' && VecWidth == 512)
1951 IID = Intrinsic::x86_avx512_pavg_w_512;
1952 else
1953 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1953)
;
1954 } else
1955 return false;
1956
1957 SmallVector<Value *, 4> Args(CI.args());
1958 Args.pop_back();
1959 Args.pop_back();
1960 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI.getModule(), IID),
1961 Args);
1962 unsigned NumArgs = CI.arg_size();
1963 Rep = EmitX86Select(Builder, CI.getArgOperand(NumArgs - 1), Rep,
1964 CI.getArgOperand(NumArgs - 2));
1965 return true;
1966}
1967
1968/// Upgrade comment in call to inline asm that represents an objc retain release
1969/// marker.
1970void llvm::UpgradeInlineAsmString(std::string *AsmStr) {
1971 size_t Pos;
1972 if (AsmStr->find("mov\tfp") == 0 &&
1973 AsmStr->find("objc_retainAutoreleaseReturnValue") != std::string::npos &&
1974 (Pos = AsmStr->find("# marker")) != std::string::npos) {
1975 AsmStr->replace(Pos, 1, ";");
1976 }
1977}
1978
1979static Value *UpgradeARMIntrinsicCall(StringRef Name, CallBase *CI, Function *F,
1980 IRBuilder<> &Builder) {
1981 if (Name == "mve.vctp64.old") {
1982 // Replace the old v4i1 vctp64 with a v2i1 vctp and predicate-casts to the
1983 // correct type.
1984 Value *VCTP = Builder.CreateCall(
1985 Intrinsic::getDeclaration(F->getParent(), Intrinsic::arm_mve_vctp64),
1986 CI->getArgOperand(0), CI->getName());
1987 Value *C1 = Builder.CreateCall(
1988 Intrinsic::getDeclaration(
1989 F->getParent(), Intrinsic::arm_mve_pred_v2i,
1990 {VectorType::get(Builder.getInt1Ty(), 2, false)}),
1991 VCTP);
1992 return Builder.CreateCall(
1993 Intrinsic::getDeclaration(
1994 F->getParent(), Intrinsic::arm_mve_pred_i2v,
1995 {VectorType::get(Builder.getInt1Ty(), 4, false)}),
1996 C1);
1997 } else if (Name == "mve.mull.int.predicated.v2i64.v4i32.v4i1" ||
1998 Name == "mve.vqdmull.predicated.v2i64.v4i32.v4i1" ||
1999 Name == "mve.vldr.gather.base.predicated.v2i64.v2i64.v4i1" ||
2000 Name == "mve.vldr.gather.base.wb.predicated.v2i64.v2i64.v4i1" ||
2001 Name ==
2002 "mve.vldr.gather.offset.predicated.v2i64.p0i64.v2i64.v4i1" ||
2003 Name == "mve.vldr.gather.offset.predicated.v2i64.p0.v2i64.v4i1" ||
2004 Name == "mve.vstr.scatter.base.predicated.v2i64.v2i64.v4i1" ||
2005 Name == "mve.vstr.scatter.base.wb.predicated.v2i64.v2i64.v4i1" ||
2006 Name ==
2007 "mve.vstr.scatter.offset.predicated.p0i64.v2i64.v2i64.v4i1" ||
2008 Name == "mve.vstr.scatter.offset.predicated.p0.v2i64.v2i64.v4i1" ||
2009 Name == "cde.vcx1q.predicated.v2i64.v4i1" ||
2010 Name == "cde.vcx1qa.predicated.v2i64.v4i1" ||
2011 Name == "cde.vcx2q.predicated.v2i64.v4i1" ||
2012 Name == "cde.vcx2qa.predicated.v2i64.v4i1" ||
2013 Name == "cde.vcx3q.predicated.v2i64.v4i1" ||
2014 Name == "cde.vcx3qa.predicated.v2i64.v4i1") {
2015 std::vector<Type *> Tys;
2016 unsigned ID = CI->getIntrinsicID();
2017 Type *V2I1Ty = FixedVectorType::get(Builder.getInt1Ty(), 2);
2018 switch (ID) {
2019 case Intrinsic::arm_mve_mull_int_predicated:
2020 case Intrinsic::arm_mve_vqdmull_predicated:
2021 case Intrinsic::arm_mve_vldr_gather_base_predicated:
2022 Tys = {CI->getType(), CI->getOperand(0)->getType(), V2I1Ty};
2023 break;
2024 case Intrinsic::arm_mve_vldr_gather_base_wb_predicated:
2025 case Intrinsic::arm_mve_vstr_scatter_base_predicated:
2026 case Intrinsic::arm_mve_vstr_scatter_base_wb_predicated:
2027 Tys = {CI->getOperand(0)->getType(), CI->getOperand(0)->getType(),
2028 V2I1Ty};
2029 break;
2030 case Intrinsic::arm_mve_vldr_gather_offset_predicated:
2031 Tys = {CI->getType(), CI->getOperand(0)->getType(),
2032 CI->getOperand(1)->getType(), V2I1Ty};
2033 break;
2034 case Intrinsic::arm_mve_vstr_scatter_offset_predicated:
2035 Tys = {CI->getOperand(0)->getType(), CI->getOperand(1)->getType(),
2036 CI->getOperand(2)->getType(), V2I1Ty};
2037 break;
2038 case Intrinsic::arm_cde_vcx1q_predicated:
2039 case Intrinsic::arm_cde_vcx1qa_predicated:
2040 case Intrinsic::arm_cde_vcx2q_predicated:
2041 case Intrinsic::arm_cde_vcx2qa_predicated:
2042 case Intrinsic::arm_cde_vcx3q_predicated:
2043 case Intrinsic::arm_cde_vcx3qa_predicated:
2044 Tys = {CI->getOperand(1)->getType(), V2I1Ty};
2045 break;
2046 default:
2047 llvm_unreachable("Unhandled Intrinsic!")::llvm::llvm_unreachable_internal("Unhandled Intrinsic!", "llvm/lib/IR/AutoUpgrade.cpp"
, 2047)
;
2048 }
2049
2050 std::vector<Value *> Ops;
2051 for (Value *Op : CI->args()) {
2052 Type *Ty = Op->getType();
2053 if (Ty->getScalarSizeInBits() == 1) {
2054 Value *C1 = Builder.CreateCall(
2055 Intrinsic::getDeclaration(
2056 F->getParent(), Intrinsic::arm_mve_pred_v2i,
2057 {VectorType::get(Builder.getInt1Ty(), 4, false)}),
2058 Op);
2059 Op = Builder.CreateCall(
2060 Intrinsic::getDeclaration(F->getParent(),
2061 Intrinsic::arm_mve_pred_i2v, {V2I1Ty}),
2062 C1);
2063 }
2064 Ops.push_back(Op);
2065 }
2066
2067 Function *Fn = Intrinsic::getDeclaration(F->getParent(), ID, Tys);
2068 return Builder.CreateCall(Fn, Ops, CI->getName());
2069 }
2070 llvm_unreachable("Unknown function for ARM CallBase upgrade.")::llvm::llvm_unreachable_internal("Unknown function for ARM CallBase upgrade."
, "llvm/lib/IR/AutoUpgrade.cpp", 2070)
;
2071}
2072
2073/// Upgrade a call to an old intrinsic. All argument and return casting must be
2074/// provided to seamlessly integrate with existing context.
2075void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
2076 // Note dyn_cast to Function is not quite the same as getCalledFunction, which
2077 // checks the callee's function type matches. It's likely we need to handle
2078 // type changes here.
2079 Function *F = dyn_cast<Function>(CI->getCalledOperand());
1
Assuming the object is a 'CastReturnType'
2080 if (!F
1.1
'F' is non-null
)
2
Taking false branch
2081 return;
2082
2083 LLVMContext &C = CI->getContext();
2084 IRBuilder<> Builder(C);
2085 Builder.SetInsertPoint(CI->getParent(), CI->getIterator());
2086
2087 if (!NewFn) {
3
Assuming 'NewFn' is non-null
4
Taking false branch
2088 // Get the Function's name.
2089 StringRef Name = F->getName();
2090
2091 assert(Name.startswith("llvm.") && "Intrinsic doesn't start with 'llvm.'")(static_cast <bool> (Name.startswith("llvm.") &&
"Intrinsic doesn't start with 'llvm.'") ? void (0) : __assert_fail
("Name.startswith(\"llvm.\") && \"Intrinsic doesn't start with 'llvm.'\""
, "llvm/lib/IR/AutoUpgrade.cpp", 2091, __extension__ __PRETTY_FUNCTION__
))
;
2092 Name = Name.substr(5);
2093
2094 bool IsX86 = Name.startswith("x86.");
2095 if (IsX86)
2096 Name = Name.substr(4);
2097 bool IsNVVM = Name.startswith("nvvm.");
2098 if (IsNVVM)
2099 Name = Name.substr(5);
2100 bool IsARM = Name.startswith("arm.");
2101 if (IsARM)
2102 Name = Name.substr(4);
2103
2104 if (IsX86 && Name.startswith("sse4a.movnt.")) {
2105 Module *M = F->getParent();
2106 SmallVector<Metadata *, 1> Elts;
2107 Elts.push_back(
2108 ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1)));
2109 MDNode *Node = MDNode::get(C, Elts);
2110
2111 Value *Arg0 = CI->getArgOperand(0);
2112 Value *Arg1 = CI->getArgOperand(1);
2113
2114 // Nontemporal (unaligned) store of the 0'th element of the float/double
2115 // vector.
2116 Type *SrcEltTy = cast<VectorType>(Arg1->getType())->getElementType();
2117 PointerType *EltPtrTy = PointerType::getUnqual(SrcEltTy);
2118 Value *Addr = Builder.CreateBitCast(Arg0, EltPtrTy, "cast");
2119 Value *Extract =
2120 Builder.CreateExtractElement(Arg1, (uint64_t)0, "extractelement");
2121
2122 StoreInst *SI = Builder.CreateAlignedStore(Extract, Addr, Align(1));
2123 SI->setMetadata(M->getMDKindID("nontemporal"), Node);
2124
2125 // Remove intrinsic.
2126 CI->eraseFromParent();
2127 return;
2128 }
2129
2130 if (IsX86 && (Name.startswith("avx.movnt.") ||
2131 Name.startswith("avx512.storent."))) {
2132 Module *M = F->getParent();
2133 SmallVector<Metadata *, 1> Elts;
2134 Elts.push_back(
2135 ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1)));
2136 MDNode *Node = MDNode::get(C, Elts);
2137
2138 Value *Arg0 = CI->getArgOperand(0);
2139 Value *Arg1 = CI->getArgOperand(1);
2140
2141 // Convert the type of the pointer to a pointer to the stored type.
2142 Value *BC = Builder.CreateBitCast(Arg0,
2143 PointerType::getUnqual(Arg1->getType()),
2144 "cast");
2145 StoreInst *SI = Builder.CreateAlignedStore(
2146 Arg1, BC,
2147 Align(Arg1->getType()->getPrimitiveSizeInBits().getFixedValue() / 8));
2148 SI->setMetadata(M->getMDKindID("nontemporal"), Node);
2149
2150 // Remove intrinsic.
2151 CI->eraseFromParent();
2152 return;
2153 }
2154
2155 if (IsX86 && Name == "sse2.storel.dq") {
2156 Value *Arg0 = CI->getArgOperand(0);
2157 Value *Arg1 = CI->getArgOperand(1);
2158
2159 auto *NewVecTy = FixedVectorType::get(Type::getInt64Ty(C), 2);
2160 Value *BC0 = Builder.CreateBitCast(Arg1, NewVecTy, "cast");
2161 Value *Elt = Builder.CreateExtractElement(BC0, (uint64_t)0);
2162 Value *BC = Builder.CreateBitCast(Arg0,
2163 PointerType::getUnqual(Elt->getType()),
2164 "cast");
2165 Builder.CreateAlignedStore(Elt, BC, Align(1));
2166
2167 // Remove intrinsic.
2168 CI->eraseFromParent();
2169 return;
2170 }
2171
2172 if (IsX86 && (Name.startswith("sse.storeu.") ||
2173 Name.startswith("sse2.storeu.") ||
2174 Name.startswith("avx.storeu."))) {
2175 Value *Arg0 = CI->getArgOperand(0);
2176 Value *Arg1 = CI->getArgOperand(1);
2177
2178 Arg0 = Builder.CreateBitCast(Arg0,
2179 PointerType::getUnqual(Arg1->getType()),
2180 "cast");
2181 Builder.CreateAlignedStore(Arg1, Arg0, Align(1));
2182
2183 // Remove intrinsic.
2184 CI->eraseFromParent();
2185 return;
2186 }
2187
2188 if (IsX86 && Name == "avx512.mask.store.ss") {
2189 Value *Mask = Builder.CreateAnd(CI->getArgOperand(2), Builder.getInt8(1));
2190 UpgradeMaskedStore(Builder, CI->getArgOperand(0), CI->getArgOperand(1),
2191 Mask, false);
2192
2193 // Remove intrinsic.
2194 CI->eraseFromParent();
2195 return;
2196 }
2197
2198 if (IsX86 && (Name.startswith("avx512.mask.store"))) {
2199 // "avx512.mask.storeu." or "avx512.mask.store."
2200 bool Aligned = Name[17] != 'u'; // "avx512.mask.storeu".
2201 UpgradeMaskedStore(Builder, CI->getArgOperand(0), CI->getArgOperand(1),
2202 CI->getArgOperand(2), Aligned);
2203
2204 // Remove intrinsic.
2205 CI->eraseFromParent();
2206 return;
2207 }
2208
2209 Value *Rep;
2210 // Upgrade packed integer vector compare intrinsics to compare instructions.
2211 if (IsX86 && (Name.startswith("sse2.pcmp") ||
2212 Name.startswith("avx2.pcmp"))) {
2213 // "sse2.pcpmpeq." "sse2.pcmpgt." "avx2.pcmpeq." or "avx2.pcmpgt."
2214 bool CmpEq = Name[9] == 'e';
2215 Rep = Builder.CreateICmp(CmpEq ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_SGT,
2216 CI->getArgOperand(0), CI->getArgOperand(1));
2217 Rep = Builder.CreateSExt(Rep, CI->getType(), "");
2218 } else if (IsX86 && (Name.startswith("avx512.broadcastm"))) {
2219 Type *ExtTy = Type::getInt32Ty(C);
2220 if (CI->getOperand(0)->getType()->isIntegerTy(8))
2221 ExtTy = Type::getInt64Ty(C);
2222 unsigned NumElts = CI->getType()->getPrimitiveSizeInBits() /
2223 ExtTy->getPrimitiveSizeInBits();
2224 Rep = Builder.CreateZExt(CI->getArgOperand(0), ExtTy);
2225 Rep = Builder.CreateVectorSplat(NumElts, Rep);
2226 } else if (IsX86 && (Name == "sse.sqrt.ss" ||
2227 Name == "sse2.sqrt.sd")) {
2228 Value *Vec = CI->getArgOperand(0);
2229 Value *Elt0 = Builder.CreateExtractElement(Vec, (uint64_t)0);
2230 Function *Intr = Intrinsic::getDeclaration(F->getParent(),
2231 Intrinsic::sqrt, Elt0->getType());
2232 Elt0 = Builder.CreateCall(Intr, Elt0);
2233 Rep = Builder.CreateInsertElement(Vec, Elt0, (uint64_t)0);
2234 } else if (IsX86 && (Name.startswith("avx.sqrt.p") ||
2235 Name.startswith("sse2.sqrt.p") ||
2236 Name.startswith("sse.sqrt.p"))) {
2237 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(),
2238 Intrinsic::sqrt,
2239 CI->getType()),
2240 {CI->getArgOperand(0)});
2241 } else if (IsX86 && (Name.startswith("avx512.mask.sqrt.p"))) {
2242 if (CI->arg_size() == 4 &&
2243 (!isa<ConstantInt>(CI->getArgOperand(3)) ||
2244 cast<ConstantInt>(CI->getArgOperand(3))->getZExtValue() != 4)) {
2245 Intrinsic::ID IID = Name[18] == 's' ? Intrinsic::x86_avx512_sqrt_ps_512
2246 : Intrinsic::x86_avx512_sqrt_pd_512;
2247
2248 Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(3) };
2249 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(),
2250 IID), Args);
2251 } else {
2252 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(),
2253 Intrinsic::sqrt,
2254 CI->getType()),
2255 {CI->getArgOperand(0)});
2256 }
2257 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
2258 CI->getArgOperand(1));
2259 } else if (IsX86 && (Name.startswith("avx512.ptestm") ||
2260 Name.startswith("avx512.ptestnm"))) {
2261 Value *Op0 = CI->getArgOperand(0);
2262 Value *Op1 = CI->getArgOperand(1);
2263 Value *Mask = CI->getArgOperand(2);
2264 Rep = Builder.CreateAnd(Op0, Op1);
2265 llvm::Type *Ty = Op0->getType();
2266 Value *Zero = llvm::Constant::getNullValue(Ty);
2267 ICmpInst::Predicate Pred =
2268 Name.startswith("avx512.ptestm") ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ;
2269 Rep = Builder.CreateICmp(Pred, Rep, Zero);
2270 Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, Mask);
2271 } else if (IsX86 && (Name.startswith("avx512.mask.pbroadcast"))){
2272 unsigned NumElts = cast<FixedVectorType>(CI->getArgOperand(1)->getType())
2273 ->getNumElements();
2274 Rep = Builder.CreateVectorSplat(NumElts, CI->getArgOperand(0));
2275 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
2276 CI->getArgOperand(1));
2277 } else if (IsX86 && (Name.startswith("avx512.kunpck"))) {
2278 unsigned NumElts = CI->getType()->getScalarSizeInBits();
2279 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), NumElts);
2280 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), NumElts);
2281 int Indices[64];
2282 for (unsigned i = 0; i != NumElts; ++i)
2283 Indices[i] = i;
2284
2285 // First extract half of each vector. This gives better codegen than
2286 // doing it in a single shuffle.
2287 LHS =
2288 Builder.CreateShuffleVector(LHS, LHS, ArrayRef(Indices, NumElts / 2));
2289 RHS =
2290 Builder.CreateShuffleVector(RHS, RHS, ArrayRef(Indices, NumElts / 2));
2291 // Concat the vectors.
2292 // NOTE: Operands have to be swapped to match intrinsic definition.
2293 Rep = Builder.CreateShuffleVector(RHS, LHS, ArrayRef(Indices, NumElts));
2294 Rep = Builder.CreateBitCast(Rep, CI->getType());
2295 } else if (IsX86 && Name == "avx512.kand.w") {
2296 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
2297 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
2298 Rep = Builder.CreateAnd(LHS, RHS);
2299 Rep = Builder.CreateBitCast(Rep, CI->getType());
2300 } else if (IsX86 && Name == "avx512.kandn.w") {
2301 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
2302 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
2303 LHS = Builder.CreateNot(LHS);
2304 Rep = Builder.CreateAnd(LHS, RHS);
2305 Rep = Builder.CreateBitCast(Rep, CI->getType());
2306 } else if (IsX86 && Name == "avx512.kor.w") {
2307 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
2308 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
2309 Rep = Builder.CreateOr(LHS, RHS);
2310 Rep = Builder.CreateBitCast(Rep, CI->getType());
2311 } else if (IsX86 && Name == "avx512.kxor.w") {
2312 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
2313 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
2314 Rep = Builder.CreateXor(LHS, RHS);
2315 Rep = Builder.CreateBitCast(Rep, CI->getType());
2316 } else if (IsX86 && Name == "avx512.kxnor.w") {
2317 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
2318 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
2319 LHS = Builder.CreateNot(LHS);
2320 Rep = Builder.CreateXor(LHS, RHS);
2321 Rep = Builder.CreateBitCast(Rep, CI->getType());
2322 } else if (IsX86 && Name == "avx512.knot.w") {
2323 Rep = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
2324 Rep = Builder.CreateNot(Rep);
2325 Rep = Builder.CreateBitCast(Rep, CI->getType());
2326 } else if (IsX86 &&
2327 (Name == "avx512.kortestz.w" || Name == "avx512.kortestc.w")) {
2328 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
2329 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
2330 Rep = Builder.CreateOr(LHS, RHS);
2331 Rep = Builder.CreateBitCast(Rep, Builder.getInt16Ty());
2332 Value *C;
2333 if (Name[14] == 'c')
2334 C = ConstantInt::getAllOnesValue(Builder.getInt16Ty());
2335 else
2336 C = ConstantInt::getNullValue(Builder.getInt16Ty());
2337 Rep = Builder.CreateICmpEQ(Rep, C);
2338 Rep = Builder.CreateZExt(Rep, Builder.getInt32Ty());
2339 } else if (IsX86 && (Name == "sse.add.ss" || Name == "sse2.add.sd" ||
2340 Name == "sse.sub.ss" || Name == "sse2.sub.sd" ||
2341 Name == "sse.mul.ss" || Name == "sse2.mul.sd" ||
2342 Name == "sse.div.ss" || Name == "sse2.div.sd")) {
2343 Type *I32Ty = Type::getInt32Ty(C);
2344 Value *Elt0 = Builder.CreateExtractElement(CI->getArgOperand(0),
2345 ConstantInt::get(I32Ty, 0));
2346 Value *Elt1 = Builder.CreateExtractElement(CI->getArgOperand(1),
2347 ConstantInt::get(I32Ty, 0));
2348 Value *EltOp;
2349 if (Name.contains(".add."))
2350 EltOp = Builder.CreateFAdd(Elt0, Elt1);
2351 else if (Name.contains(".sub."))
2352 EltOp = Builder.CreateFSub(Elt0, Elt1);
2353 else if (Name.contains(".mul."))
2354 EltOp = Builder.CreateFMul(Elt0, Elt1);
2355 else
2356 EltOp = Builder.CreateFDiv(Elt0, Elt1);
2357 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), EltOp,
2358 ConstantInt::get(I32Ty, 0));
2359 } else if (IsX86 && Name.startswith("avx512.mask.pcmp")) {
2360 // "avx512.mask.pcmpeq." or "avx512.mask.pcmpgt."
2361 bool CmpEq = Name[16] == 'e';
2362 Rep = upgradeMaskedCompare(Builder, *CI, CmpEq ? 0 : 6, true);
2363 } else if (IsX86 && Name.startswith("avx512.mask.vpshufbitqmb.")) {
2364 Type *OpTy = CI->getArgOperand(0)->getType();
2365 unsigned VecWidth = OpTy->getPrimitiveSizeInBits();
2366 Intrinsic::ID IID;
2367 switch (VecWidth) {
2368 default: llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 2368)
;
2369 case 128: IID = Intrinsic::x86_avx512_vpshufbitqmb_128; break;
2370 case 256: IID = Intrinsic::x86_avx512_vpshufbitqmb_256; break;
2371 case 512: IID = Intrinsic::x86_avx512_vpshufbitqmb_512; break;
2372 }
2373
2374 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
2375 { CI->getOperand(0), CI->getArgOperand(1) });
2376 Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, CI->getArgOperand(2));
2377 } else if (IsX86 && Name.startswith("avx512.mask.fpclass.p")) {
2378 Type *OpTy = CI->getArgOperand(0)->getType();
2379 unsigned VecWidth = OpTy->getPrimitiveSizeInBits();
2380 unsigned EltWidth = OpTy->getScalarSizeInBits();
2381 Intrinsic::ID IID;
2382 if (VecWidth == 128 && EltWidth == 32)
2383 IID = Intrinsic::x86_avx512_fpclass_ps_128;
2384 else if (VecWidth == 256 && EltWidth == 32)
2385 IID = Intrinsic::x86_avx512_fpclass_ps_256;
2386 else if (VecWidth == 512 && EltWidth == 32)
2387 IID = Intrinsic::x86_avx512_fpclass_ps_512;
2388 else if (VecWidth == 128 && EltWidth == 64)
2389 IID = Intrinsic::x86_avx512_fpclass_pd_128;
2390 else if (VecWidth == 256 && EltWidth == 64)
2391 IID = Intrinsic::x86_avx512_fpclass_pd_256;
2392 else if (VecWidth == 512 && EltWidth == 64)
2393 IID = Intrinsic::x86_avx512_fpclass_pd_512;
2394 else
2395 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 2395)
;
2396
2397 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
2398 { CI->getOperand(0), CI->getArgOperand(1) });
2399 Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, CI->getArgOperand(2));
2400 } else if (IsX86 && Name.startswith("avx512.cmp.p")) {
2401 SmallVector<Value *, 4> Args(CI->args());
2402 Type *OpTy = Args[0]->getType();
2403 unsigned VecWidth = OpTy->getPrimitiveSizeInBits();
2404 unsigned EltWidth = OpTy->getScalarSizeInBits();
2405 Intrinsic::ID IID;
2406 if (VecWidth == 128 && EltWidth == 32)
2407 IID = Intrinsic::x86_avx512_mask_cmp_ps_128;
2408 else if (VecWidth == 256 && EltWidth == 32)
2409 IID = Intrinsic::x86_avx512_mask_cmp_ps_256;
2410 else if (VecWidth == 512 && EltWidth == 32)
2411 IID = Intrinsic::x86_avx512_mask_cmp_ps_512;
2412 else if (VecWidth == 128 && EltWidth == 64)
2413 IID = Intrinsic::x86_avx512_mask_cmp_pd_128;
2414 else if (VecWidth == 256 && EltWidth == 64)
2415 IID = Intrinsic::x86_avx512_mask_cmp_pd_256;
2416 else if (VecWidth == 512 && EltWidth == 64)
2417 IID = Intrinsic::x86_avx512_mask_cmp_pd_512;
2418 else
2419 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 2419)
;
2420
2421 Value *Mask = Constant::getAllOnesValue(CI->getType());
2422 if (VecWidth == 512)
2423 std::swap(Mask, Args.back());
2424 Args.push_back(Mask);
2425
2426 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
2427 Args);
2428 } else if (IsX86 && Name.startswith("avx512.mask.cmp.")) {
2429 // Integer compare intrinsics.
2430 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
2431 Rep = upgradeMaskedCompare(Builder, *CI, Imm, true);
2432 } else if (IsX86 && Name.startswith("avx512.mask.ucmp.")) {
2433 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
2434 Rep = upgradeMaskedCompare(Builder, *CI, Imm, false);
2435 } else if (IsX86 && (Name.startswith("avx512.cvtb2mask.") ||
2436 Name.startswith("avx512.cvtw2mask.") ||
2437 Name.startswith("avx512.cvtd2mask.") ||
2438 Name.startswith("avx512.cvtq2mask."))) {
2439 Value *Op = CI->getArgOperand(0);
2440 Value *Zero = llvm::Constant::getNullValue(Op->getType());
2441 Rep = Builder.CreateICmp(ICmpInst::ICMP_SLT, Op, Zero);
2442 Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, nullptr);
2443 } else if(IsX86 && (Name == "ssse3.pabs.b.128" ||
2444 Name == "ssse3.pabs.w.128" ||
2445 Name == "ssse3.pabs.d.128" ||
2446 Name.startswith("avx2.pabs") ||
2447 Name.startswith("avx512.mask.pabs"))) {
2448 Rep = upgradeAbs(Builder, *CI);
2449 } else if (IsX86 && (Name == "sse41.pmaxsb" ||
2450 Name == "sse2.pmaxs.w" ||
2451 Name == "sse41.pmaxsd" ||
2452 Name.startswith("avx2.pmaxs") ||
2453 Name.startswith("avx512.mask.pmaxs"))) {
2454 Rep = UpgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::smax);
2455 } else if (IsX86 && (Name == "sse2.pmaxu.b" ||
2456 Name == "sse41.pmaxuw" ||
2457 Name == "sse41.pmaxud" ||
2458 Name.startswith("avx2.pmaxu") ||
2459 Name.startswith("avx512.mask.pmaxu"))) {
2460 Rep = UpgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::umax);
2461 } else if (IsX86 && (Name == "sse41.pminsb" ||
2462 Name == "sse2.pmins.w" ||
2463 Name == "sse41.pminsd" ||
2464 Name.startswith("avx2.pmins") ||
2465 Name.startswith("avx512.mask.pmins"))) {
2466 Rep = UpgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::smin);
2467 } else if (IsX86 && (Name == "sse2.pminu.b" ||
2468 Name == "sse41.pminuw" ||
2469 Name == "sse41.pminud" ||
2470 Name.startswith("avx2.pminu") ||
2471 Name.startswith("avx512.mask.pminu"))) {
2472 Rep = UpgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::umin);
2473 } else if (IsX86 && (Name == "sse2.pmulu.dq" ||
2474 Name == "avx2.pmulu.dq" ||
2475 Name == "avx512.pmulu.dq.512" ||
2476 Name.startswith("avx512.mask.pmulu.dq."))) {
2477 Rep = upgradePMULDQ(Builder, *CI, /*Signed*/false);
2478 } else if (IsX86 && (Name == "sse41.pmuldq" ||
2479 Name == "avx2.pmul.dq" ||
2480 Name == "avx512.pmul.dq.512" ||
2481 Name.startswith("avx512.mask.pmul.dq."))) {
2482 Rep = upgradePMULDQ(Builder, *CI, /*Signed*/true);
2483 } else if (IsX86 && (Name == "sse.cvtsi2ss" ||
2484 Name == "sse2.cvtsi2sd" ||
2485 Name == "sse.cvtsi642ss" ||
2486 Name == "sse2.cvtsi642sd")) {
2487 Rep = Builder.CreateSIToFP(
2488 CI->getArgOperand(1),
2489 cast<VectorType>(CI->getType())->getElementType());
2490 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0);
2491 } else if (IsX86 && Name == "avx512.cvtusi2sd") {
2492 Rep = Builder.CreateUIToFP(
2493 CI->getArgOperand(1),
2494 cast<VectorType>(CI->getType())->getElementType());
2495 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0);
2496 } else if (IsX86 && Name == "sse2.cvtss2sd") {
2497 Rep = Builder.CreateExtractElement(CI->getArgOperand(1), (uint64_t)0);
2498 Rep = Builder.CreateFPExt(
2499 Rep, cast<VectorType>(CI->getType())->getElementType());
2500 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0);
2501 } else if (IsX86 && (Name == "sse2.cvtdq2pd" ||
2502 Name == "sse2.cvtdq2ps" ||
2503 Name == "avx.cvtdq2.pd.256" ||
2504 Name == "avx.cvtdq2.ps.256" ||
2505 Name.startswith("avx512.mask.cvtdq2pd.") ||
2506 Name.startswith("avx512.mask.cvtudq2pd.") ||
2507 Name.startswith("avx512.mask.cvtdq2ps.") ||
2508 Name.startswith("avx512.mask.cvtudq2ps.") ||
2509 Name.startswith("avx512.mask.cvtqq2pd.") ||
2510 Name.startswith("avx512.mask.cvtuqq2pd.") ||
2511 Name == "avx512.mask.cvtqq2ps.256" ||
2512 Name == "avx512.mask.cvtqq2ps.512" ||
2513 Name == "avx512.mask.cvtuqq2ps.256" ||
2514 Name == "avx512.mask.cvtuqq2ps.512" ||
2515 Name == "sse2.cvtps2pd" ||
2516 Name == "avx.cvt.ps2.pd.256" ||
2517 Name == "avx512.mask.cvtps2pd.128" ||
2518 Name == "avx512.mask.cvtps2pd.256")) {
2519 auto *DstTy = cast<FixedVectorType>(CI->getType());
2520 Rep = CI->getArgOperand(0);
2521 auto *SrcTy = cast<FixedVectorType>(Rep->getType());
2522
2523 unsigned NumDstElts = DstTy->getNumElements();
2524 if (NumDstElts < SrcTy->getNumElements()) {
2525 assert(NumDstElts == 2 && "Unexpected vector size")(static_cast <bool> (NumDstElts == 2 && "Unexpected vector size"
) ? void (0) : __assert_fail ("NumDstElts == 2 && \"Unexpected vector size\""
, "llvm/lib/IR/AutoUpgrade.cpp", 2525, __extension__ __PRETTY_FUNCTION__
))
;
2526 Rep = Builder.CreateShuffleVector(Rep, Rep, ArrayRef<int>{0, 1});
2527 }
2528
2529 bool IsPS2PD = SrcTy->getElementType()->isFloatTy();
2530 bool IsUnsigned = (StringRef::npos != Name.find("cvtu"));
2531 if (IsPS2PD)
2532 Rep = Builder.CreateFPExt(Rep, DstTy, "cvtps2pd");
2533 else if (CI->arg_size() == 4 &&
2534 (!isa<ConstantInt>(CI->getArgOperand(3)) ||
2535 cast<ConstantInt>(CI->getArgOperand(3))->getZExtValue() != 4)) {
2536 Intrinsic::ID IID = IsUnsigned ? Intrinsic::x86_avx512_uitofp_round
2537 : Intrinsic::x86_avx512_sitofp_round;
2538 Function *F = Intrinsic::getDeclaration(CI->getModule(), IID,
2539 { DstTy, SrcTy });
2540 Rep = Builder.CreateCall(F, { Rep, CI->getArgOperand(3) });
2541 } else {
2542 Rep = IsUnsigned ? Builder.CreateUIToFP(Rep, DstTy, "cvt")
2543 : Builder.CreateSIToFP(Rep, DstTy, "cvt");
2544 }
2545
2546 if (CI->arg_size() >= 3)
2547 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
2548 CI->getArgOperand(1));
2549 } else if (IsX86 && (Name.startswith("avx512.mask.vcvtph2ps.") ||
2550 Name.startswith("vcvtph2ps."))) {
2551 auto *DstTy = cast<FixedVectorType>(CI->getType());
2552 Rep = CI->getArgOperand(0);
2553 auto *SrcTy = cast<FixedVectorType>(Rep->getType());
2554 unsigned NumDstElts = DstTy->getNumElements();
2555 if (NumDstElts != SrcTy->getNumElements()) {
2556 assert(NumDstElts == 4 && "Unexpected vector size")(static_cast <bool> (NumDstElts == 4 && "Unexpected vector size"
) ? void (0) : __assert_fail ("NumDstElts == 4 && \"Unexpected vector size\""
, "llvm/lib/IR/AutoUpgrade.cpp", 2556, __extension__ __PRETTY_FUNCTION__
))
;
2557 Rep = Builder.CreateShuffleVector(Rep, Rep, ArrayRef<int>{0, 1, 2, 3});
2558 }
2559 Rep = Builder.CreateBitCast(
2560 Rep, FixedVectorType::get(Type::getHalfTy(C), NumDstElts));
2561 Rep = Builder.CreateFPExt(Rep, DstTy, "cvtph2ps");
2562 if (CI->arg_size() >= 3)
2563 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
2564 CI->getArgOperand(1));
2565 } else if (IsX86 && Name.startswith("avx512.mask.load")) {
2566 // "avx512.mask.loadu." or "avx512.mask.load."
2567 bool Aligned = Name[16] != 'u'; // "avx512.mask.loadu".
2568 Rep =
2569 UpgradeMaskedLoad(Builder, CI->getArgOperand(0), CI->getArgOperand(1),
2570 CI->getArgOperand(2), Aligned);
2571 } else if (IsX86 && Name.startswith("avx512.mask.expand.load.")) {
2572 auto *ResultTy = cast<FixedVectorType>(CI->getType());
2573 Type *PtrTy = ResultTy->getElementType();
2574
2575 // Cast the pointer to element type.
2576 Value *Ptr = Builder.CreateBitCast(CI->getOperand(0),
2577 llvm::PointerType::getUnqual(PtrTy));
2578
2579 Value *MaskVec = getX86MaskVec(Builder, CI->getArgOperand(2),
2580 ResultTy->getNumElements());
2581
2582 Function *ELd = Intrinsic::getDeclaration(F->getParent(),
2583 Intrinsic::masked_expandload,
2584 ResultTy);
2585 Rep = Builder.CreateCall(ELd, { Ptr, MaskVec, CI->getOperand(1) });
2586 } else if (IsX86 && Name.startswith("avx512.mask.compress.store.")) {
2587 auto *ResultTy = cast<VectorType>(CI->getArgOperand(1)->getType());
2588 Type *PtrTy = ResultTy->getElementType();
2589
2590 // Cast the pointer to element type.
2591 Value *Ptr = Builder.CreateBitCast(CI->getOperand(0),
2592 llvm::PointerType::getUnqual(PtrTy));
2593
2594 Value *MaskVec =
2595 getX86MaskVec(Builder, CI->getArgOperand(2),
2596 cast<FixedVectorType>(ResultTy)->getNumElements());
2597
2598 Function *CSt = Intrinsic::getDeclaration(F->getParent(),
2599 Intrinsic::masked_compressstore,
2600 ResultTy);
2601 Rep = Builder.CreateCall(CSt, { CI->getArgOperand(1), Ptr, MaskVec });
2602 } else if (IsX86 && (Name.startswith("avx512.mask.compress.") ||
2603 Name.startswith("avx512.mask.expand."))) {
2604 auto *ResultTy = cast<FixedVectorType>(CI->getType());
2605
2606 Value *MaskVec = getX86MaskVec(Builder, CI->getArgOperand(2),
2607 ResultTy->getNumElements());
2608
2609 bool IsCompress = Name[12] == 'c';
2610 Intrinsic::ID IID = IsCompress ? Intrinsic::x86_avx512_mask_compress
2611 : Intrinsic::x86_avx512_mask_expand;
2612 Function *Intr = Intrinsic::getDeclaration(F->getParent(), IID, ResultTy);
2613 Rep = Builder.CreateCall(Intr, { CI->getOperand(0), CI->getOperand(1),
2614 MaskVec });
2615 } else if (IsX86 && Name.startswith("xop.vpcom")) {
2616 bool IsSigned;
2617 if (Name.endswith("ub") || Name.endswith("uw") || Name.endswith("ud") ||
2618 Name.endswith("uq"))
2619 IsSigned = false;
2620 else if (Name.endswith("b") || Name.endswith("w") || Name.endswith("d") ||
2621 Name.endswith("q"))
2622 IsSigned = true;
2623 else
2624 llvm_unreachable("Unknown suffix")::llvm::llvm_unreachable_internal("Unknown suffix", "llvm/lib/IR/AutoUpgrade.cpp"
, 2624)
;
2625
2626 unsigned Imm;
2627 if (CI->arg_size() == 3) {
2628 Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
2629 } else {
2630 Name = Name.substr(9); // strip off "xop.vpcom"
2631 if (Name.startswith("lt"))
2632 Imm = 0;
2633 else if (Name.startswith("le"))
2634 Imm = 1;
2635 else if (Name.startswith("gt"))
2636 Imm = 2;
2637 else if (Name.startswith("ge"))
2638 Imm = 3;
2639 else if (Name.startswith("eq"))
2640 Imm = 4;
2641 else if (Name.startswith("ne"))
2642 Imm = 5;
2643 else if (Name.startswith("false"))
2644 Imm = 6;
2645 else if (Name.startswith("true"))
2646 Imm = 7;
2647 else
2648 llvm_unreachable("Unknown condition")::llvm::llvm_unreachable_internal("Unknown condition", "llvm/lib/IR/AutoUpgrade.cpp"
, 2648)
;
2649 }
2650
2651 Rep = upgradeX86vpcom(Builder, *CI, Imm, IsSigned);
2652 } else if (IsX86 && Name.startswith("xop.vpcmov")) {
2653 Value *Sel = CI->getArgOperand(2);
2654 Value *NotSel = Builder.CreateNot(Sel);
2655 Value *Sel0 = Builder.CreateAnd(CI->getArgOperand(0), Sel);
2656 Value *Sel1 = Builder.CreateAnd(CI->getArgOperand(1), NotSel);
2657 Rep = Builder.CreateOr(Sel0, Sel1);
2658 } else if (IsX86 && (Name.startswith("xop.vprot") ||
2659 Name.startswith("avx512.prol") ||
2660 Name.startswith("avx512.mask.prol"))) {
2661 Rep = upgradeX86Rotate(Builder, *CI, false);
2662 } else if (IsX86 && (Name.startswith("avx512.pror") ||
2663 Name.startswith("avx512.mask.pror"))) {
2664 Rep = upgradeX86Rotate(Builder, *CI, true);
2665 } else if (IsX86 && (Name.startswith("avx512.vpshld.") ||
2666 Name.startswith("avx512.mask.vpshld") ||
2667 Name.startswith("avx512.maskz.vpshld"))) {
2668 bool ZeroMask = Name[11] == 'z';
2669 Rep = upgradeX86ConcatShift(Builder, *CI, false, ZeroMask);
2670 } else if (IsX86 && (Name.startswith("avx512.vpshrd.") ||
2671 Name.startswith("avx512.mask.vpshrd") ||
2672 Name.startswith("avx512.maskz.vpshrd"))) {
2673 bool ZeroMask = Name[11] == 'z';
2674 Rep = upgradeX86ConcatShift(Builder, *CI, true, ZeroMask);
2675 } else if (IsX86 && Name == "sse42.crc32.64.8") {
2676 Function *CRC32 = Intrinsic::getDeclaration(F->getParent(),
2677 Intrinsic::x86_sse42_crc32_32_8);
2678 Value *Trunc0 = Builder.CreateTrunc(CI->getArgOperand(0), Type::getInt32Ty(C));
2679 Rep = Builder.CreateCall(CRC32, {Trunc0, CI->getArgOperand(1)});
2680 Rep = Builder.CreateZExt(Rep, CI->getType(), "");
2681 } else if (IsX86 && (Name.startswith("avx.vbroadcast.s") ||
2682 Name.startswith("avx512.vbroadcast.s"))) {
2683 // Replace broadcasts with a series of insertelements.
2684 auto *VecTy = cast<FixedVectorType>(CI->getType());
2685 Type *EltTy = VecTy->getElementType();
2686 unsigned EltNum = VecTy->getNumElements();
2687 Value *Cast = Builder.CreateBitCast(CI->getArgOperand(0),
2688 EltTy->getPointerTo());
2689 Value *Load = Builder.CreateLoad(EltTy, Cast);
2690 Type *I32Ty = Type::getInt32Ty(C);
2691 Rep = PoisonValue::get(VecTy);
2692 for (unsigned I = 0; I < EltNum; ++I)
2693 Rep = Builder.CreateInsertElement(Rep, Load,
2694 ConstantInt::get(I32Ty, I));
2695 } else if (IsX86 && (Name.startswith("sse41.pmovsx") ||
2696 Name.startswith("sse41.pmovzx") ||
2697 Name.startswith("avx2.pmovsx") ||
2698 Name.startswith("avx2.pmovzx") ||
2699 Name.startswith("avx512.mask.pmovsx") ||
2700 Name.startswith("avx512.mask.pmovzx"))) {
2701 auto *DstTy = cast<FixedVectorType>(CI->getType());
2702 unsigned NumDstElts = DstTy->getNumElements();
2703
2704 // Extract a subvector of the first NumDstElts lanes and sign/zero extend.
2705 SmallVector<int, 8> ShuffleMask(NumDstElts);
2706 for (unsigned i = 0; i != NumDstElts; ++i)
2707 ShuffleMask[i] = i;
2708
2709 Value *SV =
2710 Builder.CreateShuffleVector(CI->getArgOperand(0), ShuffleMask);
2711
2712 bool DoSext = (StringRef::npos != Name.find("pmovsx"));
2713 Rep = DoSext ? Builder.CreateSExt(SV, DstTy)
2714 : Builder.CreateZExt(SV, DstTy);
2715 // If there are 3 arguments, it's a masked intrinsic so we need a select.
2716 if (CI->arg_size() == 3)
2717 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
2718 CI->getArgOperand(1));
2719 } else if (Name == "avx512.mask.pmov.qd.256" ||
2720 Name == "avx512.mask.pmov.qd.512" ||
2721 Name == "avx512.mask.pmov.wb.256" ||
2722 Name == "avx512.mask.pmov.wb.512") {
2723 Type *Ty = CI->getArgOperand(1)->getType();
2724 Rep = Builder.CreateTrunc(CI->getArgOperand(0), Ty);
2725 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
2726 CI->getArgOperand(1));
2727 } else if (IsX86 && (Name.startswith("avx.vbroadcastf128") ||
2728 Name == "avx2.vbroadcasti128")) {
2729 // Replace vbroadcastf128/vbroadcasti128 with a vector load+shuffle.
2730 Type *EltTy = cast<VectorType>(CI->getType())->getElementType();
2731 unsigned NumSrcElts = 128 / EltTy->getPrimitiveSizeInBits();
2732 auto *VT = FixedVectorType::get(EltTy, NumSrcElts);
2733 Value *Op = Builder.CreatePointerCast(CI->getArgOperand(0),
2734 PointerType::getUnqual(VT));
2735 Value *Load = Builder.CreateAlignedLoad(VT, Op, Align(1));
2736 if (NumSrcElts == 2)
2737 Rep = Builder.CreateShuffleVector(Load, ArrayRef<int>{0, 1, 0, 1});
2738 else
2739 Rep = Builder.CreateShuffleVector(
2740 Load, ArrayRef<int>{0, 1, 2, 3, 0, 1, 2, 3});
2741 } else if (IsX86 && (Name.startswith("avx512.mask.shuf.i") ||
2742 Name.startswith("avx512.mask.shuf.f"))) {
2743 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
2744 Type *VT = CI->getType();
2745 unsigned NumLanes = VT->getPrimitiveSizeInBits() / 128;
2746 unsigned NumElementsInLane = 128 / VT->getScalarSizeInBits();
2747 unsigned ControlBitsMask = NumLanes - 1;
2748 unsigned NumControlBits = NumLanes / 2;
2749 SmallVector<int, 8> ShuffleMask(0);
2750
2751 for (unsigned l = 0; l != NumLanes; ++l) {
2752 unsigned LaneMask = (Imm >> (l * NumControlBits)) & ControlBitsMask;
2753 // We actually need the other source.
2754 if (l >= NumLanes / 2)
2755 LaneMask += NumLanes;
2756 for (unsigned i = 0; i != NumElementsInLane; ++i)
2757 ShuffleMask.push_back(LaneMask * NumElementsInLane + i);
2758 }
2759 Rep = Builder.CreateShuffleVector(CI->getArgOperand(0),
2760 CI->getArgOperand(1), ShuffleMask);
2761 Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep,
2762 CI->getArgOperand(3));
2763 }else if (IsX86 && (Name.startswith("avx512.mask.broadcastf") ||
2764 Name.startswith("avx512.mask.broadcasti"))) {
2765 unsigned NumSrcElts =
2766 cast<FixedVectorType>(CI->getArgOperand(0)->getType())
2767 ->getNumElements();
2768 unsigned NumDstElts =
2769 cast<FixedVectorType>(CI->getType())->getNumElements();
2770
2771 SmallVector<int, 8> ShuffleMask(NumDstElts);
2772 for (unsigned i = 0; i != NumDstElts; ++i)
2773 ShuffleMask[i] = i % NumSrcElts;
2774
2775 Rep = Builder.CreateShuffleVector(CI->getArgOperand(0),
2776 CI->getArgOperand(0),
2777 ShuffleMask);
2778 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
2779 CI->getArgOperand(1));
2780 } else if (IsX86 && (Name.startswith("avx2.pbroadcast") ||
2781 Name.startswith("avx2.vbroadcast") ||
2782 Name.startswith("avx512.pbroadcast") ||
2783 Name.startswith("avx512.mask.broadcast.s"))) {
2784 // Replace vp?broadcasts with a vector shuffle.
2785 Value *Op = CI->getArgOperand(0);
2786 ElementCount EC = cast<VectorType>(CI->getType())->getElementCount();
2787 Type *MaskTy = VectorType::get(Type::getInt32Ty(C), EC);
2788 SmallVector<int, 8> M;
2789 ShuffleVectorInst::getShuffleMask(Constant::getNullValue(MaskTy), M);
2790 Rep = Builder.CreateShuffleVector(Op, M);
2791
2792 if (CI->arg_size() == 3)
2793 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
2794 CI->getArgOperand(1));
2795 } else if (IsX86 && (Name.startswith("sse2.padds.") ||
2796 Name.startswith("avx2.padds.") ||
2797 Name.startswith("avx512.padds.") ||
2798 Name.startswith("avx512.mask.padds."))) {
2799 Rep = UpgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::sadd_sat);
2800 } else if (IsX86 && (Name.startswith("sse2.psubs.") ||
2801 Name.startswith("avx2.psubs.") ||
2802 Name.startswith("avx512.psubs.") ||
2803 Name.startswith("avx512.mask.psubs."))) {
2804 Rep = UpgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::ssub_sat);
2805 } else if (IsX86 && (Name.startswith("sse2.paddus.") ||
2806 Name.startswith("avx2.paddus.") ||
2807 Name.startswith("avx512.mask.paddus."))) {
2808 Rep = UpgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::uadd_sat);
2809 } else if (IsX86 && (Name.startswith("sse2.psubus.") ||
2810 Name.startswith("avx2.psubus.") ||
2811 Name.startswith("avx512.mask.psubus."))) {
2812 Rep = UpgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::usub_sat);
2813 } else if (IsX86 && Name.startswith("avx512.mask.palignr.")) {
2814 Rep = UpgradeX86ALIGNIntrinsics(Builder, CI->getArgOperand(0),
2815 CI->getArgOperand(1),
2816 CI->getArgOperand(2),
2817 CI->getArgOperand(3),
2818 CI->getArgOperand(4),
2819 false);
2820 } else if (IsX86 && Name.startswith("avx512.mask.valign.")) {
2821 Rep = UpgradeX86ALIGNIntrinsics(Builder, CI->getArgOperand(0),
2822 CI->getArgOperand(1),
2823 CI->getArgOperand(2),
2824 CI->getArgOperand(3),
2825 CI->getArgOperand(4),
2826 true);
2827 } else if (IsX86 && (Name == "sse2.psll.dq" ||
2828 Name == "avx2.psll.dq")) {
2829 // 128/256-bit shift left specified in bits.
2830 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
2831 Rep = UpgradeX86PSLLDQIntrinsics(Builder, CI->getArgOperand(0),
2832 Shift / 8); // Shift is in bits.
2833 } else if (IsX86 && (Name == "sse2.psrl.dq" ||
2834 Name == "avx2.psrl.dq")) {
2835 // 128/256-bit shift right specified in bits.
2836 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
2837 Rep = UpgradeX86PSRLDQIntrinsics(Builder, CI->getArgOperand(0),
2838 Shift / 8); // Shift is in bits.
2839 } else if (IsX86 && (Name == "sse2.psll.dq.bs" ||
2840 Name == "avx2.psll.dq.bs" ||
2841 Name == "avx512.psll.dq.512")) {
2842 // 128/256/512-bit shift left specified in bytes.
2843 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
2844 Rep = UpgradeX86PSLLDQIntrinsics(Builder, CI->getArgOperand(0), Shift);
2845 } else if (IsX86 && (Name == "sse2.psrl.dq.bs" ||
2846 Name == "avx2.psrl.dq.bs" ||
2847 Name == "avx512.psrl.dq.512")) {
2848 // 128/256/512-bit shift right specified in bytes.
2849 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
2850 Rep = UpgradeX86PSRLDQIntrinsics(Builder, CI->getArgOperand(0), Shift);
2851 } else if (IsX86 && (Name == "sse41.pblendw" ||
2852 Name.startswith("sse41.blendp") ||
2853 Name.startswith("avx.blend.p") ||
2854 Name == "avx2.pblendw" ||
2855 Name.startswith("avx2.pblendd."))) {
2856 Value *Op0 = CI->getArgOperand(0);
2857 Value *Op1 = CI->getArgOperand(1);
2858 unsigned Imm = cast <ConstantInt>(CI->getArgOperand(2))->getZExtValue();
2859 auto *VecTy = cast<FixedVectorType>(CI->getType());
2860 unsigned NumElts = VecTy->getNumElements();
2861
2862 SmallVector<int, 16> Idxs(NumElts);
2863 for (unsigned i = 0; i != NumElts; ++i)
2864 Idxs[i] = ((Imm >> (i%8)) & 1) ? i + NumElts : i;
2865
2866 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
2867 } else if (IsX86 && (Name.startswith("avx.vinsertf128.") ||
2868 Name == "avx2.vinserti128" ||
2869 Name.startswith("avx512.mask.insert"))) {
2870 Value *Op0 = CI->getArgOperand(0);
2871 Value *Op1 = CI->getArgOperand(1);
2872 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
2873 unsigned DstNumElts =
2874 cast<FixedVectorType>(CI->getType())->getNumElements();
2875 unsigned SrcNumElts =
2876 cast<FixedVectorType>(Op1->getType())->getNumElements();
2877 unsigned Scale = DstNumElts / SrcNumElts;
2878
2879 // Mask off the high bits of the immediate value; hardware ignores those.
2880 Imm = Imm % Scale;
2881
2882 // Extend the second operand into a vector the size of the destination.
2883 SmallVector<int, 8> Idxs(DstNumElts);
2884 for (unsigned i = 0; i != SrcNumElts; ++i)
2885 Idxs[i] = i;
2886 for (unsigned i = SrcNumElts; i != DstNumElts; ++i)
2887 Idxs[i] = SrcNumElts;
2888 Rep = Builder.CreateShuffleVector(Op1, Idxs);
2889
2890 // Insert the second operand into the first operand.
2891
2892 // Note that there is no guarantee that instruction lowering will actually
2893 // produce a vinsertf128 instruction for the created shuffles. In
2894 // particular, the 0 immediate case involves no lane changes, so it can
2895 // be handled as a blend.
2896
2897 // Example of shuffle mask for 32-bit elements:
2898 // Imm = 1 <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
2899 // Imm = 0 <i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7 >
2900
2901 // First fill with identify mask.
2902 for (unsigned i = 0; i != DstNumElts; ++i)
2903 Idxs[i] = i;
2904 // Then replace the elements where we need to insert.
2905 for (unsigned i = 0; i != SrcNumElts; ++i)
2906 Idxs[i + Imm * SrcNumElts] = i + DstNumElts;
2907 Rep = Builder.CreateShuffleVector(Op0, Rep, Idxs);
2908
2909 // If the intrinsic has a mask operand, handle that.
2910 if (CI->arg_size() == 5)
2911 Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep,
2912 CI->getArgOperand(3));
2913 } else if (IsX86 && (Name.startswith("avx.vextractf128.") ||
2914 Name == "avx2.vextracti128" ||
2915 Name.startswith("avx512.mask.vextract"))) {
2916 Value *Op0 = CI->getArgOperand(0);
2917 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
2918 unsigned DstNumElts =
2919 cast<FixedVectorType>(CI->getType())->getNumElements();
2920 unsigned SrcNumElts =
2921 cast<FixedVectorType>(Op0->getType())->getNumElements();
2922 unsigned Scale = SrcNumElts / DstNumElts;
2923
2924 // Mask off the high bits of the immediate value; hardware ignores those.
2925 Imm = Imm % Scale;
2926
2927 // Get indexes for the subvector of the input vector.
2928 SmallVector<int, 8> Idxs(DstNumElts);
2929 for (unsigned i = 0; i != DstNumElts; ++i) {
2930 Idxs[i] = i + (Imm * DstNumElts);
2931 }
2932 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
2933
2934 // If the intrinsic has a mask operand, handle that.
2935 if (CI->arg_size() == 4)
2936 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
2937 CI->getArgOperand(2));
2938 } else if (!IsX86 && Name == "stackprotectorcheck") {
2939 Rep = nullptr;
2940 } else if (IsX86 && (Name.startswith("avx512.mask.perm.df.") ||
2941 Name.startswith("avx512.mask.perm.di."))) {
2942 Value *Op0 = CI->getArgOperand(0);
2943 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
2944 auto *VecTy = cast<FixedVectorType>(CI->getType());
2945 unsigned NumElts = VecTy->getNumElements();
2946
2947 SmallVector<int, 8> Idxs(NumElts);
2948 for (unsigned i = 0; i != NumElts; ++i)
2949 Idxs[i] = (i & ~0x3) + ((Imm >> (2 * (i & 0x3))) & 3);
2950
2951 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
2952
2953 if (CI->arg_size() == 4)
2954 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
2955 CI->getArgOperand(2));
2956 } else if (IsX86 && (Name.startswith("avx.vperm2f128.") ||
2957 Name == "avx2.vperm2i128")) {
2958 // The immediate permute control byte looks like this:
2959 // [1:0] - select 128 bits from sources for low half of destination
2960 // [2] - ignore
2961 // [3] - zero low half of destination
2962 // [5:4] - select 128 bits from sources for high half of destination
2963 // [6] - ignore
2964 // [7] - zero high half of destination
2965
2966 uint8_t Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
2967
2968 unsigned NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
2969 unsigned HalfSize = NumElts / 2;
2970 SmallVector<int, 8> ShuffleMask(NumElts);
2971
2972 // Determine which operand(s) are actually in use for this instruction.
2973 Value *V0 = (Imm & 0x02) ? CI->getArgOperand(1) : CI->getArgOperand(0);
2974 Value *V1 = (Imm & 0x20) ? CI->getArgOperand(1) : CI->getArgOperand(0);
2975
2976 // If needed, replace operands based on zero mask.
2977 V0 = (Imm & 0x08) ? ConstantAggregateZero::get(CI->getType()) : V0;
2978 V1 = (Imm & 0x80) ? ConstantAggregateZero::get(CI->getType()) : V1;
2979
2980 // Permute low half of result.
2981 unsigned StartIndex = (Imm & 0x01) ? HalfSize : 0;
2982 for (unsigned i = 0; i < HalfSize; ++i)
2983 ShuffleMask[i] = StartIndex + i;
2984
2985 // Permute high half of result.
2986 StartIndex = (Imm & 0x10) ? HalfSize : 0;
2987 for (unsigned i = 0; i < HalfSize; ++i)
2988 ShuffleMask[i + HalfSize] = NumElts + StartIndex + i;
2989
2990 Rep = Builder.CreateShuffleVector(V0, V1, ShuffleMask);
2991
2992 } else if (IsX86 && (Name.startswith("avx.vpermil.") ||
2993 Name == "sse2.pshuf.d" ||
2994 Name.startswith("avx512.mask.vpermil.p") ||
2995 Name.startswith("avx512.mask.pshuf.d."))) {
2996 Value *Op0 = CI->getArgOperand(0);
2997 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
2998 auto *VecTy = cast<FixedVectorType>(CI->getType());
2999 unsigned NumElts = VecTy->getNumElements();
3000 // Calculate the size of each index in the immediate.
3001 unsigned IdxSize = 64 / VecTy->getScalarSizeInBits();
3002 unsigned IdxMask = ((1 << IdxSize) - 1);
3003
3004 SmallVector<int, 8> Idxs(NumElts);
3005 // Lookup the bits for this element, wrapping around the immediate every
3006 // 8-bits. Elements are grouped into sets of 2 or 4 elements so we need
3007 // to offset by the first index of each group.
3008 for (unsigned i = 0; i != NumElts; ++i)
3009 Idxs[i] = ((Imm >> ((i * IdxSize) % 8)) & IdxMask) | (i & ~IdxMask);
3010
3011 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
3012
3013 if (CI->arg_size() == 4)
3014 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3015 CI->getArgOperand(2));
3016 } else if (IsX86 && (Name == "sse2.pshufl.w" ||
3017 Name.startswith("avx512.mask.pshufl.w."))) {
3018 Value *Op0 = CI->getArgOperand(0);
3019 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
3020 unsigned NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
3021
3022 SmallVector<int, 16> Idxs(NumElts);
3023 for (unsigned l = 0; l != NumElts; l += 8) {
3024 for (unsigned i = 0; i != 4; ++i)
3025 Idxs[i + l] = ((Imm >> (2 * i)) & 0x3) + l;
3026 for (unsigned i = 4; i != 8; ++i)
3027 Idxs[i + l] = i + l;
3028 }
3029
3030 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
3031
3032 if (CI->arg_size() == 4)
3033 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3034 CI->getArgOperand(2));
3035 } else if (IsX86 && (Name == "sse2.pshufh.w" ||
3036 Name.startswith("avx512.mask.pshufh.w."))) {
3037 Value *Op0 = CI->getArgOperand(0);
3038 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
3039 unsigned NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
3040
3041 SmallVector<int, 16> Idxs(NumElts);
3042 for (unsigned l = 0; l != NumElts; l += 8) {
3043 for (unsigned i = 0; i != 4; ++i)
3044 Idxs[i + l] = i + l;
3045 for (unsigned i = 0; i != 4; ++i)
3046 Idxs[i + l + 4] = ((Imm >> (2 * i)) & 0x3) + 4 + l;
3047 }
3048
3049 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
3050
3051 if (CI->arg_size() == 4)
3052 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3053 CI->getArgOperand(2));
3054 } else if (IsX86 && Name.startswith("avx512.mask.shuf.p")) {
3055 Value *Op0 = CI->getArgOperand(0);
3056 Value *Op1 = CI->getArgOperand(1);
3057 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
3058 unsigned NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
3059
3060 unsigned NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
3061 unsigned HalfLaneElts = NumLaneElts / 2;
3062
3063 SmallVector<int, 16> Idxs(NumElts);
3064 for (unsigned i = 0; i != NumElts; ++i) {
3065 // Base index is the starting element of the lane.
3066 Idxs[i] = i - (i % NumLaneElts);
3067 // If we are half way through the lane switch to the other source.
3068 if ((i % NumLaneElts) >= HalfLaneElts)
3069 Idxs[i] += NumElts;
3070 // Now select the specific element. By adding HalfLaneElts bits from
3071 // the immediate. Wrapping around the immediate every 8-bits.
3072 Idxs[i] += (Imm >> ((i * HalfLaneElts) % 8)) & ((1 << HalfLaneElts) - 1);
3073 }
3074
3075 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
3076
3077 Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep,
3078 CI->getArgOperand(3));
3079 } else if (IsX86 && (Name.startswith("avx512.mask.movddup") ||
3080 Name.startswith("avx512.mask.movshdup") ||
3081 Name.startswith("avx512.mask.movsldup"))) {
3082 Value *Op0 = CI->getArgOperand(0);
3083 unsigned NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
3084 unsigned NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
3085
3086 unsigned Offset = 0;
3087 if (Name.startswith("avx512.mask.movshdup."))
3088 Offset = 1;
3089
3090 SmallVector<int, 16> Idxs(NumElts);
3091 for (unsigned l = 0; l != NumElts; l += NumLaneElts)
3092 for (unsigned i = 0; i != NumLaneElts; i += 2) {
3093 Idxs[i + l + 0] = i + l + Offset;
3094 Idxs[i + l + 1] = i + l + Offset;
3095 }
3096
3097 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
3098
3099 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
3100 CI->getArgOperand(1));
3101 } else if (IsX86 && (Name.startswith("avx512.mask.punpckl") ||
3102 Name.startswith("avx512.mask.unpckl."))) {
3103 Value *Op0 = CI->getArgOperand(0);
3104 Value *Op1 = CI->getArgOperand(1);
3105 int NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
3106 int NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
3107
3108 SmallVector<int, 64> Idxs(NumElts);
3109 for (int l = 0; l != NumElts; l += NumLaneElts)
3110 for (int i = 0; i != NumLaneElts; ++i)
3111 Idxs[i + l] = l + (i / 2) + NumElts * (i % 2);
3112
3113 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
3114
3115 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3116 CI->getArgOperand(2));
3117 } else if (IsX86 && (Name.startswith("avx512.mask.punpckh") ||
3118 Name.startswith("avx512.mask.unpckh."))) {
3119 Value *Op0 = CI->getArgOperand(0);
3120 Value *Op1 = CI->getArgOperand(1);
3121 int NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
3122 int NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
3123
3124 SmallVector<int, 64> Idxs(NumElts);
3125 for (int l = 0; l != NumElts; l += NumLaneElts)
3126 for (int i = 0; i != NumLaneElts; ++i)
3127 Idxs[i + l] = (NumLaneElts / 2) + l + (i / 2) + NumElts * (i % 2);
3128
3129 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
3130
3131 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3132 CI->getArgOperand(2));
3133 } else if (IsX86 && (Name.startswith("avx512.mask.and.") ||
3134 Name.startswith("avx512.mask.pand."))) {
3135 VectorType *FTy = cast<VectorType>(CI->getType());
3136 VectorType *ITy = VectorType::getInteger(FTy);
3137 Rep = Builder.CreateAnd(Builder.CreateBitCast(CI->getArgOperand(0), ITy),
3138 Builder.CreateBitCast(CI->getArgOperand(1), ITy));
3139 Rep = Builder.CreateBitCast(Rep, FTy);
3140 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3141 CI->getArgOperand(2));
3142 } else if (IsX86 && (Name.startswith("avx512.mask.andn.") ||
3143 Name.startswith("avx512.mask.pandn."))) {
3144 VectorType *FTy = cast<VectorType>(CI->getType());
3145 VectorType *ITy = VectorType::getInteger(FTy);
3146 Rep = Builder.CreateNot(Builder.CreateBitCast(CI->getArgOperand(0), ITy));
3147 Rep = Builder.CreateAnd(Rep,
3148 Builder.CreateBitCast(CI->getArgOperand(1), ITy));
3149 Rep = Builder.CreateBitCast(Rep, FTy);
3150 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3151 CI->getArgOperand(2));
3152 } else if (IsX86 && (Name.startswith("avx512.mask.or.") ||
3153 Name.startswith("avx512.mask.por."))) {
3154 VectorType *FTy = cast<VectorType>(CI->getType());
3155 VectorType *ITy = VectorType::getInteger(FTy);
3156 Rep = Builder.CreateOr(Builder.CreateBitCast(CI->getArgOperand(0), ITy),
3157 Builder.CreateBitCast(CI->getArgOperand(1), ITy));
3158 Rep = Builder.CreateBitCast(Rep, FTy);
3159 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3160 CI->getArgOperand(2));
3161 } else if (IsX86 && (Name.startswith("avx512.mask.xor.") ||
3162 Name.startswith("avx512.mask.pxor."))) {
3163 VectorType *FTy = cast<VectorType>(CI->getType());
3164 VectorType *ITy = VectorType::getInteger(FTy);
3165 Rep = Builder.CreateXor(Builder.CreateBitCast(CI->getArgOperand(0), ITy),
3166 Builder.CreateBitCast(CI->getArgOperand(1), ITy));
3167 Rep = Builder.CreateBitCast(Rep, FTy);
3168 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3169 CI->getArgOperand(2));
3170 } else if (IsX86 && Name.startswith("avx512.mask.padd.")) {
3171 Rep = Builder.CreateAdd(CI->getArgOperand(0), CI->getArgOperand(1));
3172 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3173 CI->getArgOperand(2));
3174 } else if (IsX86 && Name.startswith("avx512.mask.psub.")) {
3175 Rep = Builder.CreateSub(CI->getArgOperand(0), CI->getArgOperand(1));
3176 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3177 CI->getArgOperand(2));
3178 } else if (IsX86 && Name.startswith("avx512.mask.pmull.")) {
3179 Rep = Builder.CreateMul(CI->getArgOperand(0), CI->getArgOperand(1));
3180 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3181 CI->getArgOperand(2));
3182 } else if (IsX86 && Name.startswith("avx512.mask.add.p")) {
3183 if (Name.endswith(".512")) {
3184 Intrinsic::ID IID;
3185 if (Name[17] == 's')
3186 IID = Intrinsic::x86_avx512_add_ps_512;
3187 else
3188 IID = Intrinsic::x86_avx512_add_pd_512;
3189
3190 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
3191 { CI->getArgOperand(0), CI->getArgOperand(1),
3192 CI->getArgOperand(4) });
3193 } else {
3194 Rep = Builder.CreateFAdd(CI->getArgOperand(0), CI->getArgOperand(1));
3195 }
3196 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3197 CI->getArgOperand(2));
3198 } else if (IsX86 && Name.startswith("avx512.mask.div.p")) {
3199 if (Name.endswith(".512")) {
3200 Intrinsic::ID IID;
3201 if (Name[17] == 's')
3202 IID = Intrinsic::x86_avx512_div_ps_512;
3203 else
3204 IID = Intrinsic::x86_avx512_div_pd_512;
3205
3206 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
3207 { CI->getArgOperand(0), CI->getArgOperand(1),
3208 CI->getArgOperand(4) });
3209 } else {
3210 Rep = Builder.CreateFDiv(CI->getArgOperand(0), CI->getArgOperand(1));
3211 }
3212 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3213 CI->getArgOperand(2));
3214 } else if (IsX86 && Name.startswith("avx512.mask.mul.p")) {
3215 if (Name.endswith(".512")) {
3216 Intrinsic::ID IID;
3217 if (Name[17] == 's')
3218 IID = Intrinsic::x86_avx512_mul_ps_512;
3219 else
3220 IID = Intrinsic::x86_avx512_mul_pd_512;
3221
3222 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
3223 { CI->getArgOperand(0), CI->getArgOperand(1),
3224 CI->getArgOperand(4) });
3225 } else {
3226 Rep = Builder.CreateFMul(CI->getArgOperand(0), CI->getArgOperand(1));
3227 }
3228 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3229 CI->getArgOperand(2));
3230 } else if (IsX86 && Name.startswith("avx512.mask.sub.p")) {
3231 if (Name.endswith(".512")) {
3232 Intrinsic::ID IID;
3233 if (Name[17] == 's')
3234 IID = Intrinsic::x86_avx512_sub_ps_512;
3235 else
3236 IID = Intrinsic::x86_avx512_sub_pd_512;
3237
3238 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
3239 { CI->getArgOperand(0), CI->getArgOperand(1),
3240 CI->getArgOperand(4) });
3241 } else {
3242 Rep = Builder.CreateFSub(CI->getArgOperand(0), CI->getArgOperand(1));
3243 }
3244 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3245 CI->getArgOperand(2));
3246 } else if (IsX86 && (Name.startswith("avx512.mask.max.p") ||
3247 Name.startswith("avx512.mask.min.p")) &&
3248 Name.drop_front(18) == ".512") {
3249 bool IsDouble = Name[17] == 'd';
3250 bool IsMin = Name[13] == 'i';
3251 static const Intrinsic::ID MinMaxTbl[2][2] = {
3252 { Intrinsic::x86_avx512_max_ps_512, Intrinsic::x86_avx512_max_pd_512 },
3253 { Intrinsic::x86_avx512_min_ps_512, Intrinsic::x86_avx512_min_pd_512 }
3254 };
3255 Intrinsic::ID IID = MinMaxTbl[IsMin][IsDouble];
3256
3257 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
3258 { CI->getArgOperand(0), CI->getArgOperand(1),
3259 CI->getArgOperand(4) });
3260 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3261 CI->getArgOperand(2));
3262 } else if (IsX86 && Name.startswith("avx512.mask.lzcnt.")) {
3263 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(),
3264 Intrinsic::ctlz,
3265 CI->getType()),
3266 { CI->getArgOperand(0), Builder.getInt1(false) });
3267 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
3268 CI->getArgOperand(1));
3269 } else if (IsX86 && Name.startswith("avx512.mask.psll")) {
3270 bool IsImmediate = Name[16] == 'i' ||
3271 (Name.size() > 18 && Name[18] == 'i');
3272 bool IsVariable = Name[16] == 'v';
3273 char Size = Name[16] == '.' ? Name[17] :
3274 Name[17] == '.' ? Name[18] :
3275 Name[18] == '.' ? Name[19] :
3276 Name[20];
3277
3278 Intrinsic::ID IID;
3279 if (IsVariable && Name[17] != '.') {
3280 if (Size == 'd' && Name[17] == '2') // avx512.mask.psllv2.di
3281 IID = Intrinsic::x86_avx2_psllv_q;
3282 else if (Size == 'd' && Name[17] == '4') // avx512.mask.psllv4.di
3283 IID = Intrinsic::x86_avx2_psllv_q_256;
3284 else if (Size == 's' && Name[17] == '4') // avx512.mask.psllv4.si
3285 IID = Intrinsic::x86_avx2_psllv_d;
3286 else if (Size == 's' && Name[17] == '8') // avx512.mask.psllv8.si
3287 IID = Intrinsic::x86_avx2_psllv_d_256;
3288 else if (Size == 'h' && Name[17] == '8') // avx512.mask.psllv8.hi
3289 IID = Intrinsic::x86_avx512_psllv_w_128;
3290 else if (Size == 'h' && Name[17] == '1') // avx512.mask.psllv16.hi
3291 IID = Intrinsic::x86_avx512_psllv_w_256;
3292 else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psllv32hi
3293 IID = Intrinsic::x86_avx512_psllv_w_512;
3294 else
3295 llvm_unreachable("Unexpected size")::llvm::llvm_unreachable_internal("Unexpected size", "llvm/lib/IR/AutoUpgrade.cpp"
, 3295)
;
3296 } else if (Name.endswith(".128")) {
3297 if (Size == 'd') // avx512.mask.psll.d.128, avx512.mask.psll.di.128
3298 IID = IsImmediate ? Intrinsic::x86_sse2_pslli_d
3299 : Intrinsic::x86_sse2_psll_d;
3300 else if (Size == 'q') // avx512.mask.psll.q.128, avx512.mask.psll.qi.128
3301 IID = IsImmediate ? Intrinsic::x86_sse2_pslli_q
3302 : Intrinsic::x86_sse2_psll_q;
3303 else if (Size == 'w') // avx512.mask.psll.w.128, avx512.mask.psll.wi.128
3304 IID = IsImmediate ? Intrinsic::x86_sse2_pslli_w
3305 : Intrinsic::x86_sse2_psll_w;
3306 else
3307 llvm_unreachable("Unexpected size")::llvm::llvm_unreachable_internal("Unexpected size", "llvm/lib/IR/AutoUpgrade.cpp"
, 3307)
;
3308 } else if (Name.endswith(".256")) {
3309 if (Size == 'd') // avx512.mask.psll.d.256, avx512.mask.psll.di.256
3310 IID = IsImmediate ? Intrinsic::x86_avx2_pslli_d
3311 : Intrinsic::x86_avx2_psll_d;
3312 else if (Size == 'q') // avx512.mask.psll.q.256, avx512.mask.psll.qi.256
3313 IID = IsImmediate ? Intrinsic::x86_avx2_pslli_q
3314 : Intrinsic::x86_avx2_psll_q;
3315 else if (Size == 'w') // avx512.mask.psll.w.256, avx512.mask.psll.wi.256
3316 IID = IsImmediate ? Intrinsic::x86_avx2_pslli_w
3317 : Intrinsic::x86_avx2_psll_w;
3318 else
3319 llvm_unreachable("Unexpected size")::llvm::llvm_unreachable_internal("Unexpected size", "llvm/lib/IR/AutoUpgrade.cpp"
, 3319)
;
3320 } else {
3321 if (Size == 'd') // psll.di.512, pslli.d, psll.d, psllv.d.512
3322 IID = IsImmediate ? Intrinsic::x86_avx512_pslli_d_512 :
3323 IsVariable ? Intrinsic::x86_avx512_psllv_d_512 :
3324 Intrinsic::x86_avx512_psll_d_512;
3325 else if (Size == 'q') // psll.qi.512, pslli.q, psll.q, psllv.q.512
3326 IID = IsImmediate ? Intrinsic::x86_avx512_pslli_q_512 :
3327 IsVariable ? Intrinsic::x86_avx512_psllv_q_512 :
3328 Intrinsic::x86_avx512_psll_q_512;
3329 else if (Size == 'w') // psll.wi.512, pslli.w, psll.w
3330 IID = IsImmediate ? Intrinsic::x86_avx512_pslli_w_512
3331 : Intrinsic::x86_avx512_psll_w_512;
3332 else
3333 llvm_unreachable("Unexpected size")::llvm::llvm_unreachable_internal("Unexpected size", "llvm/lib/IR/AutoUpgrade.cpp"
, 3333)
;
3334 }
3335
3336 Rep = UpgradeX86MaskedShift(Builder, *CI, IID);
3337 } else if (IsX86 && Name.startswith("avx512.mask.psrl")) {
3338 bool IsImmediate = Name[16] == 'i' ||
3339 (Name.size() > 18 && Name[18] == 'i');
3340 bool IsVariable = Name[16] == 'v';
3341 char Size = Name[16] == '.' ? Name[17] :
3342 Name[17] == '.' ? Name[18] :
3343 Name[18] == '.' ? Name[19] :
3344 Name[20];
3345
3346 Intrinsic::ID IID;
3347 if (IsVariable && Name[17] != '.') {
3348 if (Size == 'd' && Name[17] == '2') // avx512.mask.psrlv2.di
3349 IID = Intrinsic::x86_avx2_psrlv_q;
3350 else if (Size == 'd' && Name[17] == '4') // avx512.mask.psrlv4.di
3351 IID = Intrinsic::x86_avx2_psrlv_q_256;
3352 else if (Size == 's' && Name[17] == '4') // avx512.mask.psrlv4.si
3353 IID = Intrinsic::x86_avx2_psrlv_d;
3354 else if (Size == 's' && Name[17] == '8') // avx512.mask.psrlv8.si
3355 IID = Intrinsic::x86_avx2_psrlv_d_256;
3356 else if (Size == 'h' && Name[17] == '8') // avx512.mask.psrlv8.hi
3357 IID = Intrinsic::x86_avx512_psrlv_w_128;
3358 else if (Size == 'h' && Name[17] == '1') // avx512.mask.psrlv16.hi
3359 IID = Intrinsic::x86_avx512_psrlv_w_256;
3360 else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psrlv32hi
3361 IID = Intrinsic::x86_avx512_psrlv_w_512;
3362 else
3363 llvm_unreachable("Unexpected size")::llvm::llvm_unreachable_internal("Unexpected size", "llvm/lib/IR/AutoUpgrade.cpp"
, 3363)
;
3364 } else if (Name.endswith(".128")) {
3365 if (Size == 'd') // avx512.mask.psrl.d.128, avx512.mask.psrl.di.128
3366 IID = IsImmediate ? Intrinsic::x86_sse2_psrli_d
3367 : Intrinsic::x86_sse2_psrl_d;
3368 else if (Size == 'q') // avx512.mask.psrl.q.128, avx512.mask.psrl.qi.128
3369 IID = IsImmediate ? Intrinsic::x86_sse2_psrli_q
3370 : Intrinsic::x86_sse2_psrl_q;
3371 else if (Size == 'w') // avx512.mask.psrl.w.128, avx512.mask.psrl.wi.128
3372 IID = IsImmediate ? Intrinsic::x86_sse2_psrli_w
3373 : Intrinsic::x86_sse2_psrl_w;
3374 else
3375 llvm_unreachable("Unexpected size")::llvm::llvm_unreachable_internal("Unexpected size", "llvm/lib/IR/AutoUpgrade.cpp"
, 3375)
;
3376 } else if (Name.endswith(".256")) {
3377 if (Size == 'd') // avx512.mask.psrl.d.256, avx512.mask.psrl.di.256
3378 IID = IsImmediate ? Intrinsic::x86_avx2_psrli_d
3379 : Intrinsic::x86_avx2_psrl_d;
3380 else if (Size == 'q') // avx512.mask.psrl.q.256, avx512.mask.psrl.qi.256
3381 IID = IsImmediate ? Intrinsic::x86_avx2_psrli_q
3382 : Intrinsic::x86_avx2_psrl_q;
3383 else if (Size == 'w') // avx512.mask.psrl.w.256, avx512.mask.psrl.wi.256
3384 IID = IsImmediate ? Intrinsic::x86_avx2_psrli_w
3385 : Intrinsic::x86_avx2_psrl_w;
3386 else
3387 llvm_unreachable("Unexpected size")::llvm::llvm_unreachable_internal("Unexpected size", "llvm/lib/IR/AutoUpgrade.cpp"
, 3387)
;
3388 } else {
3389 if (Size == 'd') // psrl.di.512, psrli.d, psrl.d, psrl.d.512
3390 IID = IsImmediate ? Intrinsic::x86_avx512_psrli_d_512 :
3391 IsVariable ? Intrinsic::x86_avx512_psrlv_d_512 :
3392 Intrinsic::x86_avx512_psrl_d_512;
3393 else if (Size == 'q') // psrl.qi.512, psrli.q, psrl.q, psrl.q.512
3394 IID = IsImmediate ? Intrinsic::x86_avx512_psrli_q_512 :
3395 IsVariable ? Intrinsic::x86_avx512_psrlv_q_512 :
3396 Intrinsic::x86_avx512_psrl_q_512;
3397 else if (Size == 'w') // psrl.wi.512, psrli.w, psrl.w)
3398 IID = IsImmediate ? Intrinsic::x86_avx512_psrli_w_512
3399 : Intrinsic::x86_avx512_psrl_w_512;
3400 else
3401 llvm_unreachable("Unexpected size")::llvm::llvm_unreachable_internal("Unexpected size", "llvm/lib/IR/AutoUpgrade.cpp"
, 3401)
;
3402 }
3403
3404 Rep = UpgradeX86MaskedShift(Builder, *CI, IID);
3405 } else if (IsX86 && Name.startswith("avx512.mask.psra")) {
3406 bool IsImmediate = Name[16] == 'i' ||
3407 (Name.size() > 18 && Name[18] == 'i');
3408 bool IsVariable = Name[16] == 'v';
3409 char Size = Name[16] == '.' ? Name[17] :
3410 Name[17] == '.' ? Name[18] :
3411 Name[18] == '.' ? Name[19] :
3412 Name[20];
3413
3414 Intrinsic::ID IID;
3415 if (IsVariable && Name[17] != '.') {
3416 if (Size == 's' && Name[17] == '4') // avx512.mask.psrav4.si
3417 IID = Intrinsic::x86_avx2_psrav_d;
3418 else if (Size == 's' && Name[17] == '8') // avx512.mask.psrav8.si
3419 IID = Intrinsic::x86_avx2_psrav_d_256;
3420 else if (Size == 'h' && Name[17] == '8') // avx512.mask.psrav8.hi
3421 IID = Intrinsic::x86_avx512_psrav_w_128;
3422 else if (Size == 'h' && Name[17] == '1') // avx512.mask.psrav16.hi
3423 IID = Intrinsic::x86_avx512_psrav_w_256;
3424 else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psrav32hi
3425 IID = Intrinsic::x86_avx512_psrav_w_512;
3426 else
3427 llvm_unreachable("Unexpected size")::llvm::llvm_unreachable_internal("Unexpected size", "llvm/lib/IR/AutoUpgrade.cpp"
, 3427)
;
3428 } else if (Name.endswith(".128")) {
3429 if (Size == 'd') // avx512.mask.psra.d.128, avx512.mask.psra.di.128
3430 IID = IsImmediate ? Intrinsic::x86_sse2_psrai_d
3431 : Intrinsic::x86_sse2_psra_d;
3432 else if (Size == 'q') // avx512.mask.psra.q.128, avx512.mask.psra.qi.128
3433 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_128 :
3434 IsVariable ? Intrinsic::x86_avx512_psrav_q_128 :
3435 Intrinsic::x86_avx512_psra_q_128;
3436 else if (Size == 'w') // avx512.mask.psra.w.128, avx512.mask.psra.wi.128
3437 IID = IsImmediate ? Intrinsic::x86_sse2_psrai_w
3438 : Intrinsic::x86_sse2_psra_w;
3439 else
3440 llvm_unreachable("Unexpected size")::llvm::llvm_unreachable_internal("Unexpected size", "llvm/lib/IR/AutoUpgrade.cpp"
, 3440)
;
3441 } else if (Name.endswith(".256")) {
3442 if (Size == 'd') // avx512.mask.psra.d.256, avx512.mask.psra.di.256
3443 IID = IsImmediate ? Intrinsic::x86_avx2_psrai_d
3444 : Intrinsic::x86_avx2_psra_d;
3445 else if (Size == 'q') // avx512.mask.psra.q.256, avx512.mask.psra.qi.256
3446 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_256 :
3447 IsVariable ? Intrinsic::x86_avx512_psrav_q_256 :
3448 Intrinsic::x86_avx512_psra_q_256;
3449 else if (Size == 'w') // avx512.mask.psra.w.256, avx512.mask.psra.wi.256
3450 IID = IsImmediate ? Intrinsic::x86_avx2_psrai_w
3451 : Intrinsic::x86_avx2_psra_w;
3452 else
3453 llvm_unreachable("Unexpected size")::llvm::llvm_unreachable_internal("Unexpected size", "llvm/lib/IR/AutoUpgrade.cpp"
, 3453)
;
3454 } else {
3455 if (Size == 'd') // psra.di.512, psrai.d, psra.d, psrav.d.512
3456 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_d_512 :
3457 IsVariable ? Intrinsic::x86_avx512_psrav_d_512 :
3458 Intrinsic::x86_avx512_psra_d_512;
3459 else if (Size == 'q') // psra.qi.512, psrai.q, psra.q
3460 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_512 :
3461 IsVariable ? Intrinsic::x86_avx512_psrav_q_512 :
3462 Intrinsic::x86_avx512_psra_q_512;
3463 else if (Size == 'w') // psra.wi.512, psrai.w, psra.w
3464 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_w_512
3465 : Intrinsic::x86_avx512_psra_w_512;
3466 else
3467 llvm_unreachable("Unexpected size")::llvm::llvm_unreachable_internal("Unexpected size", "llvm/lib/IR/AutoUpgrade.cpp"
, 3467)
;
3468 }
3469
3470 Rep = UpgradeX86MaskedShift(Builder, *CI, IID);
3471 } else if (IsX86 && Name.startswith("avx512.mask.move.s")) {
3472 Rep = upgradeMaskedMove(Builder, *CI);
3473 } else if (IsX86 && Name.startswith("avx512.cvtmask2")) {
3474 Rep = UpgradeMaskToInt(Builder, *CI);
3475 } else if (IsX86 && Name.endswith(".movntdqa")) {
3476 Module *M = F->getParent();
3477 MDNode *Node = MDNode::get(
3478 C, ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1)));
3479
3480 Value *Ptr = CI->getArgOperand(0);
3481
3482 // Convert the type of the pointer to a pointer to the stored type.
3483 Value *BC = Builder.CreateBitCast(
3484 Ptr, PointerType::getUnqual(CI->getType()), "cast");
3485 LoadInst *LI = Builder.CreateAlignedLoad(
3486 CI->getType(), BC,
3487 Align(CI->getType()->getPrimitiveSizeInBits().getFixedValue() / 8));
3488 LI->setMetadata(M->getMDKindID("nontemporal"), Node);
3489 Rep = LI;
3490 } else if (IsX86 && (Name.startswith("fma.vfmadd.") ||
3491 Name.startswith("fma.vfmsub.") ||
3492 Name.startswith("fma.vfnmadd.") ||
3493 Name.startswith("fma.vfnmsub."))) {
3494 bool NegMul = Name[6] == 'n';
3495 bool NegAcc = NegMul ? Name[8] == 's' : Name[7] == 's';
3496 bool IsScalar = NegMul ? Name[12] == 's' : Name[11] == 's';
3497
3498 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1),
3499 CI->getArgOperand(2) };
3500
3501 if (IsScalar) {
3502 Ops[0] = Builder.CreateExtractElement(Ops[0], (uint64_t)0);
3503 Ops[1] = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
3504 Ops[2] = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
3505 }
3506
3507 if (NegMul && !IsScalar)
3508 Ops[0] = Builder.CreateFNeg(Ops[0]);
3509 if (NegMul && IsScalar)
3510 Ops[1] = Builder.CreateFNeg(Ops[1]);
3511 if (NegAcc)
3512 Ops[2] = Builder.CreateFNeg(Ops[2]);
3513
3514 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(),
3515 Intrinsic::fma,
3516 Ops[0]->getType()),
3517 Ops);
3518
3519 if (IsScalar)
3520 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep,
3521 (uint64_t)0);
3522 } else if (IsX86 && Name.startswith("fma4.vfmadd.s")) {
3523 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1),
3524 CI->getArgOperand(2) };
3525
3526 Ops[0] = Builder.CreateExtractElement(Ops[0], (uint64_t)0);
3527 Ops[1] = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
3528 Ops[2] = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
3529
3530 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(),
3531 Intrinsic::fma,
3532 Ops[0]->getType()),
3533 Ops);
3534
3535 Rep = Builder.CreateInsertElement(Constant::getNullValue(CI->getType()),
3536 Rep, (uint64_t)0);
3537 } else if (IsX86 && (Name.startswith("avx512.mask.vfmadd.s") ||
3538 Name.startswith("avx512.maskz.vfmadd.s") ||
3539 Name.startswith("avx512.mask3.vfmadd.s") ||
3540 Name.startswith("avx512.mask3.vfmsub.s") ||
3541 Name.startswith("avx512.mask3.vfnmsub.s"))) {
3542 bool IsMask3 = Name[11] == '3';
3543 bool IsMaskZ = Name[11] == 'z';
3544 // Drop the "avx512.mask." to make it easier.
3545 Name = Name.drop_front(IsMask3 || IsMaskZ ? 13 : 12);
3546 bool NegMul = Name[2] == 'n';
3547 bool NegAcc = NegMul ? Name[4] == 's' : Name[3] == 's';
3548
3549 Value *A = CI->getArgOperand(0);
3550 Value *B = CI->getArgOperand(1);
3551 Value *C = CI->getArgOperand(2);
3552
3553 if (NegMul && (IsMask3 || IsMaskZ))
3554 A = Builder.CreateFNeg(A);
3555 if (NegMul && !(IsMask3 || IsMaskZ))
3556 B = Builder.CreateFNeg(B);
3557 if (NegAcc)
3558 C = Builder.CreateFNeg(C);
3559
3560 A = Builder.CreateExtractElement(A, (uint64_t)0);
3561 B = Builder.CreateExtractElement(B, (uint64_t)0);
3562 C = Builder.CreateExtractElement(C, (uint64_t)0);
3563
3564 if (!isa<ConstantInt>(CI->getArgOperand(4)) ||
3565 cast<ConstantInt>(CI->getArgOperand(4))->getZExtValue() != 4) {
3566 Value *Ops[] = { A, B, C, CI->getArgOperand(4) };
3567
3568 Intrinsic::ID IID;
3569 if (Name.back() == 'd')
3570 IID = Intrinsic::x86_avx512_vfmadd_f64;
3571 else
3572 IID = Intrinsic::x86_avx512_vfmadd_f32;
3573 Function *FMA = Intrinsic::getDeclaration(CI->getModule(), IID);
3574 Rep = Builder.CreateCall(FMA, Ops);
3575 } else {
3576 Function *FMA = Intrinsic::getDeclaration(CI->getModule(),
3577 Intrinsic::fma,
3578 A->getType());
3579 Rep = Builder.CreateCall(FMA, { A, B, C });
3580 }
3581
3582 Value *PassThru = IsMaskZ ? Constant::getNullValue(Rep->getType()) :
3583 IsMask3 ? C : A;
3584
3585 // For Mask3 with NegAcc, we need to create a new extractelement that
3586 // avoids the negation above.
3587 if (NegAcc && IsMask3)
3588 PassThru = Builder.CreateExtractElement(CI->getArgOperand(2),
3589 (uint64_t)0);
3590
3591 Rep = EmitX86ScalarSelect(Builder, CI->getArgOperand(3),
3592 Rep, PassThru);
3593 Rep = Builder.CreateInsertElement(CI->getArgOperand(IsMask3 ? 2 : 0),
3594 Rep, (uint64_t)0);
3595 } else if (IsX86 && (Name.startswith("avx512.mask.vfmadd.p") ||
3596 Name.startswith("avx512.mask.vfnmadd.p") ||
3597 Name.startswith("avx512.mask.vfnmsub.p") ||
3598 Name.startswith("avx512.mask3.vfmadd.p") ||
3599 Name.startswith("avx512.mask3.vfmsub.p") ||
3600 Name.startswith("avx512.mask3.vfnmsub.p") ||
3601 Name.startswith("avx512.maskz.vfmadd.p"))) {
3602 bool IsMask3 = Name[11] == '3';
3603 bool IsMaskZ = Name[11] == 'z';
3604 // Drop the "avx512.mask." to make it easier.
3605 Name = Name.drop_front(IsMask3 || IsMaskZ ? 13 : 12);
3606 bool NegMul = Name[2] == 'n';
3607 bool NegAcc = NegMul ? Name[4] == 's' : Name[3] == 's';
3608
3609 Value *A = CI->getArgOperand(0);
3610 Value *B = CI->getArgOperand(1);
3611 Value *C = CI->getArgOperand(2);
3612
3613 if (NegMul && (IsMask3 || IsMaskZ))
3614 A = Builder.CreateFNeg(A);
3615 if (NegMul && !(IsMask3 || IsMaskZ))
3616 B = Builder.CreateFNeg(B);
3617 if (NegAcc)
3618 C = Builder.CreateFNeg(C);
3619
3620 if (CI->arg_size() == 5 &&
3621 (!isa<ConstantInt>(CI->getArgOperand(4)) ||
3622 cast<ConstantInt>(CI->getArgOperand(4))->getZExtValue() != 4)) {
3623 Intrinsic::ID IID;
3624 // Check the character before ".512" in string.
3625 if (Name[Name.size()-5] == 's')
3626 IID = Intrinsic::x86_avx512_vfmadd_ps_512;
3627 else
3628 IID = Intrinsic::x86_avx512_vfmadd_pd_512;
3629
3630 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
3631 { A, B, C, CI->getArgOperand(4) });
3632 } else {
3633 Function *FMA = Intrinsic::getDeclaration(CI->getModule(),
3634 Intrinsic::fma,
3635 A->getType());
3636 Rep = Builder.CreateCall(FMA, { A, B, C });
3637 }
3638
3639 Value *PassThru = IsMaskZ ? llvm::Constant::getNullValue(CI->getType()) :
3640 IsMask3 ? CI->getArgOperand(2) :
3641 CI->getArgOperand(0);
3642
3643 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
3644 } else if (IsX86 && Name.startswith("fma.vfmsubadd.p")) {
3645 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
3646 unsigned EltWidth = CI->getType()->getScalarSizeInBits();
3647 Intrinsic::ID IID;
3648 if (VecWidth == 128 && EltWidth == 32)
3649 IID = Intrinsic::x86_fma_vfmaddsub_ps;
3650 else if (VecWidth == 256 && EltWidth == 32)
3651 IID = Intrinsic::x86_fma_vfmaddsub_ps_256;
3652 else if (VecWidth == 128 && EltWidth == 64)
3653 IID = Intrinsic::x86_fma_vfmaddsub_pd;
3654 else if (VecWidth == 256 && EltWidth == 64)
3655 IID = Intrinsic::x86_fma_vfmaddsub_pd_256;
3656 else
3657 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 3657)
;
3658
3659 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1),
3660 CI->getArgOperand(2) };
3661 Ops[2] = Builder.CreateFNeg(Ops[2]);
3662 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
3663 Ops);
3664 } else if (IsX86 && (Name.startswith("avx512.mask.vfmaddsub.p") ||
3665 Name.startswith("avx512.mask3.vfmaddsub.p") ||
3666 Name.startswith("avx512.maskz.vfmaddsub.p") ||
3667 Name.startswith("avx512.mask3.vfmsubadd.p"))) {
3668 bool IsMask3 = Name[11] == '3';
3669 bool IsMaskZ = Name[11] == 'z';
3670 // Drop the "avx512.mask." to make it easier.
3671 Name = Name.drop_front(IsMask3 || IsMaskZ ? 13 : 12);
3672 bool IsSubAdd = Name[3] == 's';
3673 if (CI->arg_size() == 5) {
3674 Intrinsic::ID IID;
3675 // Check the character before ".512" in string.
3676 if (Name[Name.size()-5] == 's')
3677 IID = Intrinsic::x86_avx512_vfmaddsub_ps_512;
3678 else
3679 IID = Intrinsic::x86_avx512_vfmaddsub_pd_512;
3680
3681 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1),
3682 CI->getArgOperand(2), CI->getArgOperand(4) };
3683 if (IsSubAdd)
3684 Ops[2] = Builder.CreateFNeg(Ops[2]);
3685
3686 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
3687 Ops);
3688 } else {
3689 int NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
3690
3691 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1),
3692 CI->getArgOperand(2) };
3693
3694 Function *FMA = Intrinsic::getDeclaration(CI->getModule(), Intrinsic::fma,
3695 Ops[0]->getType());
3696 Value *Odd = Builder.CreateCall(FMA, Ops);
3697 Ops[2] = Builder.CreateFNeg(Ops[2]);
3698 Value *Even = Builder.CreateCall(FMA, Ops);
3699
3700 if (IsSubAdd)
3701 std::swap(Even, Odd);
3702
3703 SmallVector<int, 32> Idxs(NumElts);
3704 for (int i = 0; i != NumElts; ++i)
3705 Idxs[i] = i + (i % 2) * NumElts;
3706
3707 Rep = Builder.CreateShuffleVector(Even, Odd, Idxs);
3708 }
3709
3710 Value *PassThru = IsMaskZ ? llvm::Constant::getNullValue(CI->getType()) :
3711 IsMask3 ? CI->getArgOperand(2) :
3712 CI->getArgOperand(0);
3713
3714 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
3715 } else if (IsX86 && (Name.startswith("avx512.mask.pternlog.") ||
3716 Name.startswith("avx512.maskz.pternlog."))) {
3717 bool ZeroMask = Name[11] == 'z';
3718 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
3719 unsigned EltWidth = CI->getType()->getScalarSizeInBits();
3720 Intrinsic::ID IID;
3721 if (VecWidth == 128 && EltWidth == 32)
3722 IID = Intrinsic::x86_avx512_pternlog_d_128;
3723 else if (VecWidth == 256 && EltWidth == 32)
3724 IID = Intrinsic::x86_avx512_pternlog_d_256;
3725 else if (VecWidth == 512 && EltWidth == 32)
3726 IID = Intrinsic::x86_avx512_pternlog_d_512;
3727 else if (VecWidth == 128 && EltWidth == 64)
3728 IID = Intrinsic::x86_avx512_pternlog_q_128;
3729 else if (VecWidth == 256 && EltWidth == 64)
3730 IID = Intrinsic::x86_avx512_pternlog_q_256;
3731 else if (VecWidth == 512 && EltWidth == 64)
3732 IID = Intrinsic::x86_avx512_pternlog_q_512;
3733 else
3734 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 3734)
;
3735
3736 Value *Args[] = { CI->getArgOperand(0) , CI->getArgOperand(1),
3737 CI->getArgOperand(2), CI->getArgOperand(3) };
3738 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID),
3739 Args);
3740 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType())
3741 : CI->getArgOperand(0);
3742 Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep, PassThru);
3743 } else if (IsX86 && (Name.startswith("avx512.mask.vpmadd52") ||
3744 Name.startswith("avx512.maskz.vpmadd52"))) {
3745 bool ZeroMask = Name[11] == 'z';
3746 bool High = Name[20] == 'h' || Name[21] == 'h';
3747 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
3748 Intrinsic::ID IID;
3749 if (VecWidth == 128 && !High)
3750 IID = Intrinsic::x86_avx512_vpmadd52l_uq_128;
3751 else if (VecWidth == 256 && !High)
3752 IID = Intrinsic::x86_avx512_vpmadd52l_uq_256;
3753 else if (VecWidth == 512 && !High)
3754 IID = Intrinsic::x86_avx512_vpmadd52l_uq_512;
3755 else if (VecWidth == 128 && High)
3756 IID = Intrinsic::x86_avx512_vpmadd52h_uq_128;
3757 else if (VecWidth == 256 && High)
3758 IID = Intrinsic::x86_avx512_vpmadd52h_uq_256;
3759 else if (VecWidth == 512 && High)
3760 IID = Intrinsic::x86_avx512_vpmadd52h_uq_512;
3761 else
3762 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 3762)
;
3763
3764 Value *Args[] = { CI->getArgOperand(0) , CI->getArgOperand(1),
3765 CI->getArgOperand(2) };
3766 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID),
3767 Args);
3768 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType())
3769 : CI->getArgOperand(0);
3770 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
3771 } else if (IsX86 && (Name.startswith("avx512.mask.vpermi2var.") ||
3772 Name.startswith("avx512.mask.vpermt2var.") ||
3773 Name.startswith("avx512.maskz.vpermt2var."))) {
3774 bool ZeroMask = Name[11] == 'z';
3775 bool IndexForm = Name[17] == 'i';
3776 Rep = UpgradeX86VPERMT2Intrinsics(Builder, *CI, ZeroMask, IndexForm);
3777 } else if (IsX86 && (Name.startswith("avx512.mask.vpdpbusd.") ||
3778 Name.startswith("avx512.maskz.vpdpbusd.") ||
3779 Name.startswith("avx512.mask.vpdpbusds.") ||
3780 Name.startswith("avx512.maskz.vpdpbusds."))) {
3781 bool ZeroMask = Name[11] == 'z';
3782 bool IsSaturating = Name[ZeroMask ? 21 : 20] == 's';
3783 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
3784 Intrinsic::ID IID;
3785 if (VecWidth == 128 && !IsSaturating)
3786 IID = Intrinsic::x86_avx512_vpdpbusd_128;
3787 else if (VecWidth == 256 && !IsSaturating)
3788 IID = Intrinsic::x86_avx512_vpdpbusd_256;
3789 else if (VecWidth == 512 && !IsSaturating)
3790 IID = Intrinsic::x86_avx512_vpdpbusd_512;
3791 else if (VecWidth == 128 && IsSaturating)
3792 IID = Intrinsic::x86_avx512_vpdpbusds_128;
3793 else if (VecWidth == 256 && IsSaturating)
3794 IID = Intrinsic::x86_avx512_vpdpbusds_256;
3795 else if (VecWidth == 512 && IsSaturating)
3796 IID = Intrinsic::x86_avx512_vpdpbusds_512;
3797 else
3798 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 3798)
;
3799
3800 Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(1),
3801 CI->getArgOperand(2) };
3802 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID),
3803 Args);
3804 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType())
3805 : CI->getArgOperand(0);
3806 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
3807 } else if (IsX86 && (Name.startswith("avx512.mask.vpdpwssd.") ||
3808 Name.startswith("avx512.maskz.vpdpwssd.") ||
3809 Name.startswith("avx512.mask.vpdpwssds.") ||
3810 Name.startswith("avx512.maskz.vpdpwssds."))) {
3811 bool ZeroMask = Name[11] == 'z';
3812 bool IsSaturating = Name[ZeroMask ? 21 : 20] == 's';
3813 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
3814 Intrinsic::ID IID;
3815 if (VecWidth == 128 && !IsSaturating)
3816 IID = Intrinsic::x86_avx512_vpdpwssd_128;
3817 else if (VecWidth == 256 && !IsSaturating)
3818 IID = Intrinsic::x86_avx512_vpdpwssd_256;
3819 else if (VecWidth == 512 && !IsSaturating)
3820 IID = Intrinsic::x86_avx512_vpdpwssd_512;
3821 else if (VecWidth == 128 && IsSaturating)
3822 IID = Intrinsic::x86_avx512_vpdpwssds_128;
3823 else if (VecWidth == 256 && IsSaturating)
3824 IID = Intrinsic::x86_avx512_vpdpwssds_256;
3825 else if (VecWidth == 512 && IsSaturating)
3826 IID = Intrinsic::x86_avx512_vpdpwssds_512;
3827 else
3828 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 3828)
;
3829
3830 Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(1),
3831 CI->getArgOperand(2) };
3832 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID),
3833 Args);
3834 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType())
3835 : CI->getArgOperand(0);
3836 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
3837 } else if (IsX86 && (Name == "addcarryx.u32" || Name == "addcarryx.u64" ||
3838 Name == "addcarry.u32" || Name == "addcarry.u64" ||
3839 Name == "subborrow.u32" || Name == "subborrow.u64")) {
3840 Intrinsic::ID IID;
3841 if (Name[0] == 'a' && Name.back() == '2')
3842 IID = Intrinsic::x86_addcarry_32;
3843 else if (Name[0] == 'a' && Name.back() == '4')
3844 IID = Intrinsic::x86_addcarry_64;
3845 else if (Name[0] == 's' && Name.back() == '2')
3846 IID = Intrinsic::x86_subborrow_32;
3847 else if (Name[0] == 's' && Name.back() == '4')
3848 IID = Intrinsic::x86_subborrow_64;
3849 else
3850 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 3850)
;
3851
3852 // Make a call with 3 operands.
3853 Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(1),
3854 CI->getArgOperand(2)};
3855 Value *NewCall = Builder.CreateCall(
3856 Intrinsic::getDeclaration(CI->getModule(), IID),
3857 Args);
3858
3859 // Extract the second result and store it.
3860 Value *Data = Builder.CreateExtractValue(NewCall, 1);
3861 // Cast the pointer to the right type.
3862 Value *Ptr = Builder.CreateBitCast(CI->getArgOperand(3),
3863 llvm::PointerType::getUnqual(Data->getType()));
3864 Builder.CreateAlignedStore(Data, Ptr, Align(1));
3865 // Replace the original call result with the first result of the new call.
3866 Value *CF = Builder.CreateExtractValue(NewCall, 0);
3867
3868 CI->replaceAllUsesWith(CF);
3869 Rep = nullptr;
3870 } else if (IsX86 && Name.startswith("avx512.mask.") &&
3871 upgradeAVX512MaskToSelect(Name, Builder, *CI, Rep)) {
3872 // Rep will be updated by the call in the condition.
3873 } else if (IsNVVM && (Name == "abs.i" || Name == "abs.ll")) {
3874 Value *Arg = CI->getArgOperand(0);
3875 Value *Neg = Builder.CreateNeg(Arg, "neg");
3876 Value *Cmp = Builder.CreateICmpSGE(
3877 Arg, llvm::Constant::getNullValue(Arg->getType()), "abs.cond");
3878 Rep = Builder.CreateSelect(Cmp, Arg, Neg, "abs");
3879 } else if (IsNVVM && (Name.startswith("atomic.load.add.f32.p") ||
3880 Name.startswith("atomic.load.add.f64.p"))) {
3881 Value *Ptr = CI->getArgOperand(0);
3882 Value *Val = CI->getArgOperand(1);
3883 Rep = Builder.CreateAtomicRMW(AtomicRMWInst::FAdd, Ptr, Val, MaybeAlign(),
3884 AtomicOrdering::SequentiallyConsistent);
3885 } else if (IsNVVM && (Name == "max.i" || Name == "max.ll" ||
3886 Name == "max.ui" || Name == "max.ull")) {
3887 Value *Arg0 = CI->getArgOperand(0);
3888 Value *Arg1 = CI->getArgOperand(1);
3889 Value *Cmp = Name.endswith(".ui") || Name.endswith(".ull")
3890 ? Builder.CreateICmpUGE(Arg0, Arg1, "max.cond")
3891 : Builder.CreateICmpSGE(Arg0, Arg1, "max.cond");
3892 Rep = Builder.CreateSelect(Cmp, Arg0, Arg1, "max");
3893 } else if (IsNVVM && (Name == "min.i" || Name == "min.ll" ||
3894 Name == "min.ui" || Name == "min.ull")) {
3895 Value *Arg0 = CI->getArgOperand(0);
3896 Value *Arg1 = CI->getArgOperand(1);
3897 Value *Cmp = Name.endswith(".ui") || Name.endswith(".ull")
3898 ? Builder.CreateICmpULE(Arg0, Arg1, "min.cond")
3899 : Builder.CreateICmpSLE(Arg0, Arg1, "min.cond");
3900 Rep = Builder.CreateSelect(Cmp, Arg0, Arg1, "min");
3901 } else if (IsNVVM && Name == "clz.ll") {
3902 // llvm.nvvm.clz.ll returns an i32, but llvm.ctlz.i64 and returns an i64.
3903 Value *Arg = CI->getArgOperand(0);
3904 Value *Ctlz = Builder.CreateCall(
3905 Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctlz,
3906 {Arg->getType()}),
3907 {Arg, Builder.getFalse()}, "ctlz");
3908 Rep = Builder.CreateTrunc(Ctlz, Builder.getInt32Ty(), "ctlz.trunc");
3909 } else if (IsNVVM && Name == "popc.ll") {
3910 // llvm.nvvm.popc.ll returns an i32, but llvm.ctpop.i64 and returns an
3911 // i64.
3912 Value *Arg = CI->getArgOperand(0);
3913 Value *Popc = Builder.CreateCall(
3914 Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctpop,
3915 {Arg->getType()}),
3916 Arg, "ctpop");
3917 Rep = Builder.CreateTrunc(Popc, Builder.getInt32Ty(), "ctpop.trunc");
3918 } else if (IsNVVM && Name == "h2f") {
3919 Rep = Builder.CreateCall(Intrinsic::getDeclaration(
3920 F->getParent(), Intrinsic::convert_from_fp16,
3921 {Builder.getFloatTy()}),
3922 CI->getArgOperand(0), "h2f");
3923 } else if (IsARM) {
3924 Rep = UpgradeARMIntrinsicCall(Name, CI, F, Builder);
3925 } else {
3926 llvm_unreachable("Unknown function for CallBase upgrade.")::llvm::llvm_unreachable_internal("Unknown function for CallBase upgrade."
, "llvm/lib/IR/AutoUpgrade.cpp", 3926)
;
3927 }
3928
3929 if (Rep)
3930 CI->replaceAllUsesWith(Rep);
3931 CI->eraseFromParent();
3932 return;
3933 }
3934
3935 const auto &DefaultCase = [&]() -> void {
3936 if (CI->getFunctionType() == NewFn->getFunctionType()) {
3937 // Handle generic mangling change.
3938 assert((static_cast <bool> ((CI->getCalledFunction()->getName
() != NewFn->getName()) && "Unknown function for CallBase upgrade and isn't just a name change"
) ? void (0) : __assert_fail ("(CI->getCalledFunction()->getName() != NewFn->getName()) && \"Unknown function for CallBase upgrade and isn't just a name change\""
, "llvm/lib/IR/AutoUpgrade.cpp", 3940, __extension__ __PRETTY_FUNCTION__
))
3939 (CI->getCalledFunction()->getName() != NewFn->getName()) &&(static_cast <bool> ((CI->getCalledFunction()->getName
() != NewFn->getName()) && "Unknown function for CallBase upgrade and isn't just a name change"
) ? void (0) : __assert_fail ("(CI->getCalledFunction()->getName() != NewFn->getName()) && \"Unknown function for CallBase upgrade and isn't just a name change\""
, "llvm/lib/IR/AutoUpgrade.cpp", 3940, __extension__ __PRETTY_FUNCTION__
))
3940 "Unknown function for CallBase upgrade and isn't just a name change")(static_cast <bool> ((CI->getCalledFunction()->getName
() != NewFn->getName()) && "Unknown function for CallBase upgrade and isn't just a name change"
) ? void (0) : __assert_fail ("(CI->getCalledFunction()->getName() != NewFn->getName()) && \"Unknown function for CallBase upgrade and isn't just a name change\""
, "llvm/lib/IR/AutoUpgrade.cpp", 3940, __extension__ __PRETTY_FUNCTION__
))
;
3941 CI->setCalledFunction(NewFn);
3942 return;
3943 }
3944
3945 // This must be an upgrade from a named to a literal struct.
3946 if (auto *OldST = dyn_cast<StructType>(CI->getType())) {
3947 assert(OldST != NewFn->getReturnType() &&(static_cast <bool> (OldST != NewFn->getReturnType()
&& "Return type must have changed") ? void (0) : __assert_fail
("OldST != NewFn->getReturnType() && \"Return type must have changed\""
, "llvm/lib/IR/AutoUpgrade.cpp", 3948, __extension__ __PRETTY_FUNCTION__
))
3948 "Return type must have changed")(static_cast <bool> (OldST != NewFn->getReturnType()
&& "Return type must have changed") ? void (0) : __assert_fail
("OldST != NewFn->getReturnType() && \"Return type must have changed\""
, "llvm/lib/IR/AutoUpgrade.cpp", 3948, __extension__ __PRETTY_FUNCTION__
))
;
3949 assert(OldST->getNumElements() ==(static_cast <bool> (OldST->getNumElements() == cast
<StructType>(NewFn->getReturnType())->getNumElements
() && "Must have same number of elements") ? void (0)
: __assert_fail ("OldST->getNumElements() == cast<StructType>(NewFn->getReturnType())->getNumElements() && \"Must have same number of elements\""
, "llvm/lib/IR/AutoUpgrade.cpp", 3951, __extension__ __PRETTY_FUNCTION__
))
3950 cast<StructType>(NewFn->getReturnType())->getNumElements() &&(static_cast <bool> (OldST->getNumElements() == cast
<StructType>(NewFn->getReturnType())->getNumElements
() && "Must have same number of elements") ? void (0)
: __assert_fail ("OldST->getNumElements() == cast<StructType>(NewFn->getReturnType())->getNumElements() && \"Must have same number of elements\""
, "llvm/lib/IR/AutoUpgrade.cpp", 3951, __extension__ __PRETTY_FUNCTION__
))
3951 "Must have same number of elements")(static_cast <bool> (OldST->getNumElements() == cast
<StructType>(NewFn->getReturnType())->getNumElements
() && "Must have same number of elements") ? void (0)
: __assert_fail ("OldST->getNumElements() == cast<StructType>(NewFn->getReturnType())->getNumElements() && \"Must have same number of elements\""
, "llvm/lib/IR/AutoUpgrade.cpp", 3951, __extension__ __PRETTY_FUNCTION__
))
;
3952
3953 SmallVector<Value *> Args(CI->args());
3954 Value *NewCI = Builder.CreateCall(NewFn, Args);
3955 Value *Res = PoisonValue::get(OldST);
3956 for (unsigned Idx = 0; Idx < OldST->getNumElements(); ++Idx) {
3957 Value *Elem = Builder.CreateExtractValue(NewCI, Idx);
3958 Res = Builder.CreateInsertValue(Res, Elem, Idx);
3959 }
3960 CI->replaceAllUsesWith(Res);
3961 CI->eraseFromParent();
3962 return;
3963 }
3964
3965 // We're probably about to produce something invalid. Let the verifier catch
3966 // it instead of dying here.
3967 CI->setCalledOperand(
3968 ConstantExpr::getPointerCast(NewFn, CI->getCalledOperand()->getType()));
3969 return;
3970 };
3971 CallInst *NewCall = nullptr;
3972 switch (NewFn->getIntrinsicID()) {
5
Control jumps to 'case vector_insert:' at line 4039
3973 default: {
3974 DefaultCase();
3975 return;
3976 }
3977 case Intrinsic::arm_neon_vst1:
3978 case Intrinsic::arm_neon_vst2:
3979 case Intrinsic::arm_neon_vst3:
3980 case Intrinsic::arm_neon_vst4:
3981 case Intrinsic::arm_neon_vst2lane:
3982 case Intrinsic::arm_neon_vst3lane:
3983 case Intrinsic::arm_neon_vst4lane: {
3984 SmallVector<Value *, 4> Args(CI->args());
3985 NewCall = Builder.CreateCall(NewFn, Args);
3986 break;
3987 }
3988 case Intrinsic::aarch64_sve_bfmlalb_lane_v2:
3989 case Intrinsic::aarch64_sve_bfmlalt_lane_v2:
3990 case Intrinsic::aarch64_sve_bfdot_lane_v2: {
3991 LLVMContext &Ctx = F->getParent()->getContext();
3992 SmallVector<Value *, 4> Args(CI->args());
3993 Args[3] = ConstantInt::get(Type::getInt32Ty(Ctx),
3994 cast<ConstantInt>(Args[3])->getZExtValue());
3995 NewCall = Builder.CreateCall(NewFn, Args);
3996 break;
3997 }
3998 case Intrinsic::aarch64_sve_ld3_sret:
3999 case Intrinsic::aarch64_sve_ld4_sret:
4000 case Intrinsic::aarch64_sve_ld2_sret: {
4001 StringRef Name = F->getName();
4002 Name = Name.substr(5);
4003 unsigned N = StringSwitch<unsigned>(Name)
4004 .StartsWith("aarch64.sve.ld2", 2)
4005 .StartsWith("aarch64.sve.ld3", 3)
4006 .StartsWith("aarch64.sve.ld4", 4)
4007 .Default(0);
4008 ScalableVectorType *RetTy =
4009 dyn_cast<ScalableVectorType>(F->getReturnType());
4010 unsigned MinElts = RetTy->getMinNumElements() / N;
4011 SmallVector<Value *, 2> Args(CI->args());
4012 Value *NewLdCall = Builder.CreateCall(NewFn, Args);
4013 Value *Ret = llvm::PoisonValue::get(RetTy);
4014 for (unsigned I = 0; I < N; I++) {
4015 Value *Idx = ConstantInt::get(Type::getInt64Ty(C), I * MinElts);
4016 Value *SRet = Builder.CreateExtractValue(NewLdCall, I);
4017 Ret = Builder.CreateInsertVector(RetTy, Ret, SRet, Idx);
4018 }
4019 NewCall = dyn_cast<CallInst>(Ret);
4020 break;
4021 }
4022
4023 case Intrinsic::vector_extract: {
4024 StringRef Name = F->getName();
4025 Name = Name.substr(5); // Strip llvm
4026 if (!Name.startswith("aarch64.sve.tuple.get")) {
4027 DefaultCase();
4028 return;
4029 }
4030 ScalableVectorType *RetTy =
4031 dyn_cast<ScalableVectorType>(F->getReturnType());
4032 unsigned MinElts = RetTy->getMinNumElements();
4033 unsigned I = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
4034 Value *NewIdx = ConstantInt::get(Type::getInt64Ty(C), I * MinElts);
4035 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0), NewIdx});
4036 break;
4037 }
4038
4039 case Intrinsic::vector_insert: {
4040 StringRef Name = F->getName();
4041 Name = Name.substr(5);
4042 if (!Name.startswith("aarch64.sve.tuple")) {
6
Assuming the condition is false
7
Taking false branch
4043 DefaultCase();
4044 return;
4045 }
4046 if (Name.startswith("aarch64.sve.tuple.set")) {
8
Assuming the condition is false
9
Taking false branch
4047 unsigned I = dyn_cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
4048 ScalableVectorType *Ty =
4049 dyn_cast<ScalableVectorType>(CI->getArgOperand(2)->getType());
4050 Value *NewIdx =
4051 ConstantInt::get(Type::getInt64Ty(C), I * Ty->getMinNumElements());
4052 NewCall = Builder.CreateCall(
4053 NewFn, {CI->getArgOperand(0), CI->getArgOperand(2), NewIdx});
4054 break;
4055 }
4056 if (Name.startswith("aarch64.sve.tuple.create")) {
10
Assuming the condition is true
11
Taking true branch
4057 unsigned N = StringSwitch<unsigned>(Name)
4058 .StartsWith("aarch64.sve.tuple.create2", 2)
4059 .StartsWith("aarch64.sve.tuple.create3", 3)
4060 .StartsWith("aarch64.sve.tuple.create4", 4)
4061 .Default(0);
4062 assert(N > 1 && "Create is expected to be between 2-4")(static_cast <bool> (N > 1 && "Create is expected to be between 2-4"
) ? void (0) : __assert_fail ("N > 1 && \"Create is expected to be between 2-4\""
, "llvm/lib/IR/AutoUpgrade.cpp", 4062, __extension__ __PRETTY_FUNCTION__
))
;
12
'?' condition is true
4063 ScalableVectorType *RetTy =
14
'RetTy' initialized to a null pointer value
4064 dyn_cast<ScalableVectorType>(F->getReturnType());
13
Assuming the object is not a 'CastReturnType'
4065 Value *Ret = llvm::PoisonValue::get(RetTy);
4066 unsigned MinElts = RetTy->getMinNumElements() / N;
15
Called C++ object pointer is null
4067 for (unsigned I = 0; I < N; I++) {
4068 Value *Idx = ConstantInt::get(Type::getInt64Ty(C), I * MinElts);
4069 Value *V = CI->getArgOperand(I);
4070 Ret = Builder.CreateInsertVector(RetTy, Ret, V, Idx);
4071 }
4072 NewCall = dyn_cast<CallInst>(Ret);
4073 }
4074 break;
4075 }
4076
4077 case Intrinsic::arm_neon_bfdot:
4078 case Intrinsic::arm_neon_bfmmla:
4079 case Intrinsic::arm_neon_bfmlalb:
4080 case Intrinsic::arm_neon_bfmlalt:
4081 case Intrinsic::aarch64_neon_bfdot:
4082 case Intrinsic::aarch64_neon_bfmmla:
4083 case Intrinsic::aarch64_neon_bfmlalb:
4084 case Intrinsic::aarch64_neon_bfmlalt: {
4085 SmallVector<Value *, 3> Args;
4086 assert(CI->arg_size() == 3 &&(static_cast <bool> (CI->arg_size() == 3 && "Mismatch between function args and call args"
) ? void (0) : __assert_fail ("CI->arg_size() == 3 && \"Mismatch between function args and call args\""
, "llvm/lib/IR/AutoUpgrade.cpp", 4087, __extension__ __PRETTY_FUNCTION__
))
4087 "Mismatch between function args and call args")(static_cast <bool> (CI->arg_size() == 3 && "Mismatch between function args and call args"
) ? void (0) : __assert_fail ("CI->arg_size() == 3 && \"Mismatch between function args and call args\""
, "llvm/lib/IR/AutoUpgrade.cpp", 4087, __extension__ __PRETTY_FUNCTION__
))
;
4088 size_t OperandWidth =
4089 CI->getArgOperand(1)->getType()->getPrimitiveSizeInBits();
4090 assert((OperandWidth == 64 || OperandWidth == 128) &&(static_cast <bool> ((OperandWidth == 64 || OperandWidth
== 128) && "Unexpected operand width") ? void (0) : __assert_fail
("(OperandWidth == 64 || OperandWidth == 128) && \"Unexpected operand width\""
, "llvm/lib/IR/AutoUpgrade.cpp", 4091, __extension__ __PRETTY_FUNCTION__
))
4091 "Unexpected operand width")(static_cast <bool> ((OperandWidth == 64 || OperandWidth
== 128) && "Unexpected operand width") ? void (0) : __assert_fail
("(OperandWidth == 64 || OperandWidth == 128) && \"Unexpected operand width\""
, "llvm/lib/IR/AutoUpgrade.cpp", 4091, __extension__ __PRETTY_FUNCTION__
))
;
4092 Type *NewTy = FixedVectorType::get(Type::getBFloatTy(C), OperandWidth / 16);
4093 auto Iter = CI->args().begin();
4094 Args.push_back(*Iter++);
4095 Args.push_back(Builder.CreateBitCast(*Iter++, NewTy));
4096 Args.push_back(Builder.CreateBitCast(*Iter++, NewTy));
4097 NewCall = Builder.CreateCall(NewFn, Args);
4098 break;
4099 }
4100
4101 case Intrinsic::bitreverse:
4102 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)});
4103 break;
4104
4105 case Intrinsic::ctlz:
4106 case Intrinsic::cttz:
4107 assert(CI->arg_size() == 1 &&(static_cast <bool> (CI->arg_size() == 1 && "Mismatch between function args and call args"
) ? void (0) : __assert_fail ("CI->arg_size() == 1 && \"Mismatch between function args and call args\""
, "llvm/lib/IR/AutoUpgrade.cpp", 4108, __extension__ __PRETTY_FUNCTION__
))
4108 "Mismatch between function args and call args")(static_cast <bool> (CI->arg_size() == 1 && "Mismatch between function args and call args"
) ? void (0) : __assert_fail ("CI->arg_size() == 1 && \"Mismatch between function args and call args\""
, "llvm/lib/IR/AutoUpgrade.cpp", 4108, __extension__ __PRETTY_FUNCTION__
))
;
4109 NewCall =
4110 Builder.CreateCall(NewFn, {CI->getArgOperand(0), Builder.getFalse()});
4111 break;
4112
4113 case Intrinsic::objectsize: {
4114 Value *NullIsUnknownSize =
4115 CI->arg_size() == 2 ? Builder.getFalse() : CI->getArgOperand(2);
4116 Value *Dynamic =
4117 CI->arg_size() < 4 ? Builder.getFalse() : CI->getArgOperand(3);
4118 NewCall = Builder.CreateCall(
4119 NewFn, {CI->getArgOperand(0), CI->getArgOperand(1), NullIsUnknownSize, Dynamic});
4120 break;
4121 }
4122
4123 case Intrinsic::ctpop:
4124 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)});
4125 break;
4126
4127 case Intrinsic::convert_from_fp16:
4128 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)});
4129 break;
4130
4131 case Intrinsic::dbg_value:
4132 // Upgrade from the old version that had an extra offset argument.
4133 assert(CI->arg_size() == 4)(static_cast <bool> (CI->arg_size() == 4) ? void (0)
: __assert_fail ("CI->arg_size() == 4", "llvm/lib/IR/AutoUpgrade.cpp"
, 4133, __extension__ __PRETTY_FUNCTION__))
;
4134 // Drop nonzero offsets instead of attempting to upgrade them.
4135 if (auto *Offset = dyn_cast_or_null<Constant>(CI->getArgOperand(1)))
4136 if (Offset->isZeroValue()) {
4137 NewCall = Builder.CreateCall(
4138 NewFn,
4139 {CI->getArgOperand(0), CI->getArgOperand(2), CI->getArgOperand(3)});
4140 break;
4141 }
4142 CI->eraseFromParent();
4143 return;
4144
4145 case Intrinsic::ptr_annotation:
4146 // Upgrade from versions that lacked the annotation attribute argument.
4147 if (CI->arg_size() != 4) {
4148 DefaultCase();
4149 return;
4150 }
4151
4152 // Create a new call with an added null annotation attribute argument.
4153 NewCall = Builder.CreateCall(
4154 NewFn,
4155 {CI->getArgOperand(0), CI->getArgOperand(1), CI->getArgOperand(2),
4156 CI->getArgOperand(3), Constant::getNullValue(Builder.getInt8PtrTy())});
4157 NewCall->takeName(CI);
4158 CI->replaceAllUsesWith(NewCall);
4159 CI->eraseFromParent();
4160 return;
4161
4162 case Intrinsic::var_annotation:
4163 // Upgrade from versions that lacked the annotation attribute argument.
4164 if (CI->arg_size() != 4) {
4165 DefaultCase();
4166 return;
4167 }
4168 // Create a new call with an added null annotation attribute argument.
4169 NewCall = Builder.CreateCall(
4170 NewFn,
4171 {CI->getArgOperand(0), CI->getArgOperand(1), CI->getArgOperand(2),
4172 CI->getArgOperand(3), Constant::getNullValue(Builder.getInt8PtrTy())});
4173 NewCall->takeName(CI);
4174 CI->replaceAllUsesWith(NewCall);
4175 CI->eraseFromParent();
4176 return;
4177
4178 case Intrinsic::x86_xop_vfrcz_ss:
4179 case Intrinsic::x86_xop_vfrcz_sd:
4180 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(1)});
4181 break;
4182
4183 case Intrinsic::x86_xop_vpermil2pd:
4184 case Intrinsic::x86_xop_vpermil2ps:
4185 case Intrinsic::x86_xop_vpermil2pd_256:
4186 case Intrinsic::x86_xop_vpermil2ps_256: {
4187 SmallVector<Value *, 4> Args(CI->args());
4188 VectorType *FltIdxTy = cast<VectorType>(Args[2]->getType());
4189 VectorType *IntIdxTy = VectorType::getInteger(FltIdxTy);
4190 Args[2] = Builder.CreateBitCast(Args[2], IntIdxTy);
4191 NewCall = Builder.CreateCall(NewFn, Args);
4192 break;
4193 }
4194
4195 case Intrinsic::x86_sse41_ptestc:
4196 case Intrinsic::x86_sse41_ptestz:
4197 case Intrinsic::x86_sse41_ptestnzc: {
4198 // The arguments for these intrinsics used to be v4f32, and changed
4199 // to v2i64. This is purely a nop, since those are bitwise intrinsics.
4200 // So, the only thing required is a bitcast for both arguments.
4201 // First, check the arguments have the old type.
4202 Value *Arg0 = CI->getArgOperand(0);
4203 if (Arg0->getType() != FixedVectorType::get(Type::getFloatTy(C), 4))
4204 return;
4205
4206 // Old intrinsic, add bitcasts
4207 Value *Arg1 = CI->getArgOperand(1);
4208
4209 auto *NewVecTy = FixedVectorType::get(Type::getInt64Ty(C), 2);
4210
4211 Value *BC0 = Builder.CreateBitCast(Arg0, NewVecTy, "cast");
4212 Value *BC1 = Builder.CreateBitCast(Arg1, NewVecTy, "cast");
4213
4214 NewCall = Builder.CreateCall(NewFn, {BC0, BC1});
4215 break;
4216 }
4217
4218 case Intrinsic::x86_rdtscp: {
4219 // This used to take 1 arguments. If we have no arguments, it is already
4220 // upgraded.
4221 if (CI->getNumOperands() == 0)
4222 return;
4223
4224 NewCall = Builder.CreateCall(NewFn);
4225 // Extract the second result and store it.
4226 Value *Data = Builder.CreateExtractValue(NewCall, 1);
4227 // Cast the pointer to the right type.
4228 Value *Ptr = Builder.CreateBitCast(CI->getArgOperand(0),
4229 llvm::PointerType::getUnqual(Data->getType()));
4230 Builder.CreateAlignedStore(Data, Ptr, Align(1));
4231 // Replace the original call result with the first result of the new call.
4232 Value *TSC = Builder.CreateExtractValue(NewCall, 0);
4233
4234 NewCall->takeName(CI);
4235 CI->replaceAllUsesWith(TSC);
4236 CI->eraseFromParent();
4237 return;
4238 }
4239
4240 case Intrinsic::x86_sse41_insertps:
4241 case Intrinsic::x86_sse41_dppd:
4242 case Intrinsic::x86_sse41_dpps:
4243 case Intrinsic::x86_sse41_mpsadbw:
4244 case Intrinsic::x86_avx_dp_ps_256:
4245 case Intrinsic::x86_avx2_mpsadbw: {
4246 // Need to truncate the last argument from i32 to i8 -- this argument models
4247 // an inherently 8-bit immediate operand to these x86 instructions.
4248 SmallVector<Value *, 4> Args(CI->args());
4249
4250 // Replace the last argument with a trunc.
4251 Args.back() = Builder.CreateTrunc(Args.back(), Type::getInt8Ty(C), "trunc");
4252 NewCall = Builder.CreateCall(NewFn, Args);
4253 break;
4254 }
4255
4256 case Intrinsic::x86_avx512_mask_cmp_pd_128:
4257 case Intrinsic::x86_avx512_mask_cmp_pd_256:
4258 case Intrinsic::x86_avx512_mask_cmp_pd_512:
4259 case Intrinsic::x86_avx512_mask_cmp_ps_128:
4260 case Intrinsic::x86_avx512_mask_cmp_ps_256:
4261 case Intrinsic::x86_avx512_mask_cmp_ps_512: {
4262 SmallVector<Value *, 4> Args(CI->args());
4263 unsigned NumElts =
4264 cast<FixedVectorType>(Args[0]->getType())->getNumElements();
4265 Args[3] = getX86MaskVec(Builder, Args[3], NumElts);
4266
4267 NewCall = Builder.CreateCall(NewFn, Args);
4268 Value *Res = ApplyX86MaskOn1BitsVec(Builder, NewCall, nullptr);
4269
4270 NewCall->takeName(CI);
4271 CI->replaceAllUsesWith(Res);
4272 CI->eraseFromParent();
4273 return;
4274 }
4275
4276 case Intrinsic::x86_avx512bf16_cvtne2ps2bf16_128:
4277 case Intrinsic::x86_avx512bf16_cvtne2ps2bf16_256:
4278 case Intrinsic::x86_avx512bf16_cvtne2ps2bf16_512:
4279 case Intrinsic::x86_avx512bf16_mask_cvtneps2bf16_128:
4280 case Intrinsic::x86_avx512bf16_cvtneps2bf16_256:
4281 case Intrinsic::x86_avx512bf16_cvtneps2bf16_512: {
4282 SmallVector<Value *, 4> Args(CI->args());
4283 unsigned NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
4284 if (NewFn->getIntrinsicID() ==
4285 Intrinsic::x86_avx512bf16_mask_cvtneps2bf16_128)
4286 Args[1] = Builder.CreateBitCast(
4287 Args[1], FixedVectorType::get(Builder.getBFloatTy(), NumElts));
4288
4289 NewCall = Builder.CreateCall(NewFn, Args);
4290 Value *Res = Builder.CreateBitCast(
4291 NewCall, FixedVectorType::get(Builder.getInt16Ty(), NumElts));
4292
4293 NewCall->takeName(CI);
4294 CI->replaceAllUsesWith(Res);
4295 CI->eraseFromParent();
4296 return;
4297 }
4298 case Intrinsic::x86_avx512bf16_dpbf16ps_128:
4299 case Intrinsic::x86_avx512bf16_dpbf16ps_256:
4300 case Intrinsic::x86_avx512bf16_dpbf16ps_512:{
4301 SmallVector<Value *, 4> Args(CI->args());
4302 unsigned NumElts =
4303 cast<FixedVectorType>(CI->getType())->getNumElements() * 2;
4304 Args[1] = Builder.CreateBitCast(
4305 Args[1], FixedVectorType::get(Builder.getBFloatTy(), NumElts));
4306 Args[2] = Builder.CreateBitCast(
4307 Args[2], FixedVectorType::get(Builder.getBFloatTy(), NumElts));
4308
4309 NewCall = Builder.CreateCall(NewFn, Args);
4310 break;
4311 }
4312
4313 case Intrinsic::thread_pointer: {
4314 NewCall = Builder.CreateCall(NewFn, {});
4315 break;
4316 }
4317
4318 case Intrinsic::invariant_start:
4319 case Intrinsic::invariant_end: {
4320 SmallVector<Value *, 4> Args(CI->args());
4321 NewCall = Builder.CreateCall(NewFn, Args);
4322 break;
4323 }
4324 case Intrinsic::masked_load:
4325 case Intrinsic::masked_store:
4326 case Intrinsic::masked_gather:
4327 case Intrinsic::masked_scatter: {
4328 SmallVector<Value *, 4> Args(CI->args());
4329 NewCall = Builder.CreateCall(NewFn, Args);
4330 NewCall->copyMetadata(*CI);
4331 break;
4332 }
4333
4334 case Intrinsic::memcpy:
4335 case Intrinsic::memmove:
4336 case Intrinsic::memset: {
4337 // We have to make sure that the call signature is what we're expecting.
4338 // We only want to change the old signatures by removing the alignment arg:
4339 // @llvm.mem[cpy|move]...(i8*, i8*, i[32|i64], i32, i1)
4340 // -> @llvm.mem[cpy|move]...(i8*, i8*, i[32|i64], i1)
4341 // @llvm.memset...(i8*, i8, i[32|64], i32, i1)
4342 // -> @llvm.memset...(i8*, i8, i[32|64], i1)
4343 // Note: i8*'s in the above can be any pointer type
4344 if (CI->arg_size() != 5) {
4345 DefaultCase();
4346 return;
4347 }
4348 // Remove alignment argument (3), and add alignment attributes to the
4349 // dest/src pointers.
4350 Value *Args[4] = {CI->getArgOperand(0), CI->getArgOperand(1),
4351 CI->getArgOperand(2), CI->getArgOperand(4)};
4352 NewCall = Builder.CreateCall(NewFn, Args);
4353 AttributeList OldAttrs = CI->getAttributes();
4354 AttributeList NewAttrs = AttributeList::get(
4355 C, OldAttrs.getFnAttrs(), OldAttrs.getRetAttrs(),
4356 {OldAttrs.getParamAttrs(0), OldAttrs.getParamAttrs(1),
4357 OldAttrs.getParamAttrs(2), OldAttrs.getParamAttrs(4)});
4358 NewCall->setAttributes(NewAttrs);
4359 auto *MemCI = cast<MemIntrinsic>(NewCall);
4360 // All mem intrinsics support dest alignment.
4361 const ConstantInt *Align = cast<ConstantInt>(CI->getArgOperand(3));
4362 MemCI->setDestAlignment(Align->getMaybeAlignValue());
4363 // Memcpy/Memmove also support source alignment.
4364 if (auto *MTI = dyn_cast<MemTransferInst>(MemCI))
4365 MTI->setSourceAlignment(Align->getMaybeAlignValue());
4366 break;
4367 }
4368 }
4369 assert(NewCall && "Should have either set this variable or returned through "(static_cast <bool> (NewCall && "Should have either set this variable or returned through "
"the default case") ? void (0) : __assert_fail ("NewCall && \"Should have either set this variable or returned through \" \"the default case\""
, "llvm/lib/IR/AutoUpgrade.cpp", 4370, __extension__ __PRETTY_FUNCTION__
))
4370 "the default case")(static_cast <bool> (NewCall && "Should have either set this variable or returned through "
"the default case") ? void (0) : __assert_fail ("NewCall && \"Should have either set this variable or returned through \" \"the default case\""
, "llvm/lib/IR/AutoUpgrade.cpp", 4370, __extension__ __PRETTY_FUNCTION__
))
;
4371 NewCall->takeName(CI);
4372 CI->replaceAllUsesWith(NewCall);
4373 CI->eraseFromParent();
4374}
4375
4376void llvm::UpgradeCallsToIntrinsic(Function *F) {
4377 assert(F && "Illegal attempt to upgrade a non-existent intrinsic.")(static_cast <bool> (F && "Illegal attempt to upgrade a non-existent intrinsic."
) ? void (0) : __assert_fail ("F && \"Illegal attempt to upgrade a non-existent intrinsic.\""
, "llvm/lib/IR/AutoUpgrade.cpp", 4377, __extension__ __PRETTY_FUNCTION__
))
;
4378
4379 // Check if this function should be upgraded and get the replacement function
4380 // if there is one.
4381 Function *NewFn;
4382 if (UpgradeIntrinsicFunction(F, NewFn)) {
4383 // Replace all users of the old function with the new function or new
4384 // instructions. This is not a range loop because the call is deleted.
4385 for (User *U : make_early_inc_range(F->users()))
4386 if (CallBase *CB = dyn_cast<CallBase>(U))
4387 UpgradeIntrinsicCall(CB, NewFn);
4388
4389 // Remove old function, no longer used, from the module.
4390 F->eraseFromParent();
4391 }
4392}
4393
4394MDNode *llvm::UpgradeTBAANode(MDNode &MD) {
4395 // Check if the tag uses struct-path aware TBAA format.
4396 if (isa<MDNode>(MD.getOperand(0)) && MD.getNumOperands() >= 3)
4397 return &MD;
4398
4399 auto &Context = MD.getContext();
4400 if (MD.getNumOperands() == 3) {
4401 Metadata *Elts[] = {MD.getOperand(0), MD.getOperand(1)};
4402 MDNode *ScalarType = MDNode::get(Context, Elts);
4403 // Create a MDNode <ScalarType, ScalarType, offset 0, const>
4404 Metadata *Elts2[] = {ScalarType, ScalarType,
4405 ConstantAsMetadata::get(
4406 Constant::getNullValue(Type::getInt64Ty(Context))),
4407 MD.getOperand(2)};
4408 return MDNode::get(Context, Elts2);
4409 }
4410 // Create a MDNode <MD, MD, offset 0>
4411 Metadata *Elts[] = {&MD, &MD, ConstantAsMetadata::get(Constant::getNullValue(
4412 Type::getInt64Ty(Context)))};
4413 return MDNode::get(Context, Elts);
4414}
4415
4416Instruction *llvm::UpgradeBitCastInst(unsigned Opc, Value *V, Type *DestTy,
4417 Instruction *&Temp) {
4418 if (Opc != Instruction::BitCast)
4419 return nullptr;
4420
4421 Temp = nullptr;
4422 Type *SrcTy = V->getType();
4423 if (SrcTy->isPtrOrPtrVectorTy() && DestTy->isPtrOrPtrVectorTy() &&
4424 SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace()) {
4425 LLVMContext &Context = V->getContext();
4426
4427 // We have no information about target data layout, so we assume that
4428 // the maximum pointer size is 64bit.
4429 Type *MidTy = Type::getInt64Ty(Context);
4430 Temp = CastInst::Create(Instruction::PtrToInt, V, MidTy);
4431
4432 return CastInst::Create(Instruction::IntToPtr, Temp, DestTy);
4433 }
4434
4435 return nullptr;
4436}
4437
4438Constant *llvm::UpgradeBitCastExpr(unsigned Opc, Constant *C, Type *DestTy) {
4439 if (Opc != Instruction::BitCast)
4440 return nullptr;
4441
4442 Type *SrcTy = C->getType();
4443 if (SrcTy->isPtrOrPtrVectorTy() && DestTy->isPtrOrPtrVectorTy() &&
4444 SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace()) {
4445 LLVMContext &Context = C->getContext();
4446
4447 // We have no information about target data layout, so we assume that
4448 // the maximum pointer size is 64bit.
4449 Type *MidTy = Type::getInt64Ty(Context);
4450
4451 return ConstantExpr::getIntToPtr(ConstantExpr::getPtrToInt(C, MidTy),
4452 DestTy);
4453 }
4454
4455 return nullptr;
4456}
4457
4458/// Check the debug info version number, if it is out-dated, drop the debug
4459/// info. Return true if module is modified.
4460bool llvm::UpgradeDebugInfo(Module &M) {
4461 unsigned Version = getDebugMetadataVersionFromModule(M);
4462 if (Version == DEBUG_METADATA_VERSION) {
4463 bool BrokenDebugInfo = false;
4464 if (verifyModule(M, &llvm::errs(), &BrokenDebugInfo))
4465 report_fatal_error("Broken module found, compilation aborted!");
4466 if (!BrokenDebugInfo)
4467 // Everything is ok.
4468 return false;
4469 else {
4470 // Diagnose malformed debug info.
4471 DiagnosticInfoIgnoringInvalidDebugMetadata Diag(M);
4472 M.getContext().diagnose(Diag);
4473 }
4474 }
4475 bool Modified = StripDebugInfo(M);
4476 if (Modified && Version != DEBUG_METADATA_VERSION) {
4477 // Diagnose a version mismatch.
4478 DiagnosticInfoDebugMetadataVersion DiagVersion(M, Version);
4479 M.getContext().diagnose(DiagVersion);
4480 }
4481 return Modified;
4482}
4483
4484/// This checks for objc retain release marker which should be upgraded. It
4485/// returns true if module is modified.
4486static bool UpgradeRetainReleaseMarker(Module &M) {
4487 bool Changed = false;
4488 const char *MarkerKey = "clang.arc.retainAutoreleasedReturnValueMarker";
4489 NamedMDNode *ModRetainReleaseMarker = M.getNamedMetadata(MarkerKey);
4490 if (ModRetainReleaseMarker) {
4491 MDNode *Op = ModRetainReleaseMarker->getOperand(0);
4492 if (Op) {
4493 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(0));
4494 if (ID) {
4495 SmallVector<StringRef, 4> ValueComp;
4496 ID->getString().split(ValueComp, "#");
4497 if (ValueComp.size() == 2) {
4498 std::string NewValue = ValueComp[0].str() + ";" + ValueComp[1].str();
4499 ID = MDString::get(M.getContext(), NewValue);
4500 }
4501 M.addModuleFlag(Module::Error, MarkerKey, ID);
4502 M.eraseNamedMetadata(ModRetainReleaseMarker);
4503 Changed = true;
4504 }
4505 }
4506 }
4507 return Changed;
4508}
4509
4510void llvm::UpgradeARCRuntime(Module &M) {
4511 // This lambda converts normal function calls to ARC runtime functions to
4512 // intrinsic calls.
4513 auto UpgradeToIntrinsic = [&](const char *OldFunc,
4514 llvm::Intrinsic::ID IntrinsicFunc) {
4515 Function *Fn = M.getFunction(OldFunc);
4516
4517 if (!Fn)
4518 return;
4519
4520 Function *NewFn = llvm::Intrinsic::getDeclaration(&M, IntrinsicFunc);
4521
4522 for (User *U : make_early_inc_range(Fn->users())) {
4523 CallInst *CI = dyn_cast<CallInst>(U);
4524 if (!CI || CI->getCalledFunction() != Fn)
4525 continue;
4526
4527 IRBuilder<> Builder(CI->getParent(), CI->getIterator());
4528 FunctionType *NewFuncTy = NewFn->getFunctionType();
4529 SmallVector<Value *, 2> Args;
4530
4531 // Don't upgrade the intrinsic if it's not valid to bitcast the return
4532 // value to the return type of the old function.
4533 if (NewFuncTy->getReturnType() != CI->getType() &&
4534 !CastInst::castIsValid(Instruction::BitCast, CI,
4535 NewFuncTy->getReturnType()))
4536 continue;
4537
4538 bool InvalidCast = false;
4539
4540 for (unsigned I = 0, E = CI->arg_size(); I != E; ++I) {
4541 Value *Arg = CI->getArgOperand(I);
4542
4543 // Bitcast argument to the parameter type of the new function if it's
4544 // not a variadic argument.
4545 if (I < NewFuncTy->getNumParams()) {
4546 // Don't upgrade the intrinsic if it's not valid to bitcast the argument
4547 // to the parameter type of the new function.
4548 if (!CastInst::castIsValid(Instruction::BitCast, Arg,
4549 NewFuncTy->getParamType(I))) {
4550 InvalidCast = true;
4551 break;
4552 }
4553 Arg = Builder.CreateBitCast(Arg, NewFuncTy->getParamType(I));
4554 }
4555 Args.push_back(Arg);
4556 }
4557
4558 if (InvalidCast)
4559 continue;
4560
4561 // Create a call instruction that calls the new function.
4562 CallInst *NewCall = Builder.CreateCall(NewFuncTy, NewFn, Args);
4563 NewCall->setTailCallKind(cast<CallInst>(CI)->getTailCallKind());
4564 NewCall->takeName(CI);
4565
4566 // Bitcast the return value back to the type of the old call.
4567 Value *NewRetVal = Builder.CreateBitCast(NewCall, CI->getType());
4568
4569 if (!CI->use_empty())
4570 CI->replaceAllUsesWith(NewRetVal);
4571 CI->eraseFromParent();
4572 }
4573
4574 if (Fn->use_empty())
4575 Fn->eraseFromParent();
4576 };
4577
4578 // Unconditionally convert a call to "clang.arc.use" to a call to
4579 // "llvm.objc.clang.arc.use".
4580 UpgradeToIntrinsic("clang.arc.use", llvm::Intrinsic::objc_clang_arc_use);
4581
4582 // Upgrade the retain release marker. If there is no need to upgrade
4583 // the marker, that means either the module is already new enough to contain
4584 // new intrinsics or it is not ARC. There is no need to upgrade runtime call.
4585 if (!UpgradeRetainReleaseMarker(M))
4586 return;
4587
4588 std::pair<const char *, llvm::Intrinsic::ID> RuntimeFuncs[] = {
4589 {"objc_autorelease", llvm::Intrinsic::objc_autorelease},
4590 {"objc_autoreleasePoolPop", llvm::Intrinsic::objc_autoreleasePoolPop},
4591 {"objc_autoreleasePoolPush", llvm::Intrinsic::objc_autoreleasePoolPush},
4592 {"objc_autoreleaseReturnValue",
4593 llvm::Intrinsic::objc_autoreleaseReturnValue},
4594 {"objc_copyWeak", llvm::Intrinsic::objc_copyWeak},
4595 {"objc_destroyWeak", llvm::Intrinsic::objc_destroyWeak},
4596 {"objc_initWeak", llvm::Intrinsic::objc_initWeak},
4597 {"objc_loadWeak", llvm::Intrinsic::objc_loadWeak},
4598 {"objc_loadWeakRetained", llvm::Intrinsic::objc_loadWeakRetained},
4599 {"objc_moveWeak", llvm::Intrinsic::objc_moveWeak},
4600 {"objc_release", llvm::Intrinsic::objc_release},
4601 {"objc_retain", llvm::Intrinsic::objc_retain},
4602 {"objc_retainAutorelease", llvm::Intrinsic::objc_retainAutorelease},
4603 {"objc_retainAutoreleaseReturnValue",
4604 llvm::Intrinsic::objc_retainAutoreleaseReturnValue},
4605 {"objc_retainAutoreleasedReturnValue",
4606 llvm::Intrinsic::objc_retainAutoreleasedReturnValue},
4607 {"objc_retainBlock", llvm::Intrinsic::objc_retainBlock},
4608 {"objc_storeStrong", llvm::Intrinsic::objc_storeStrong},
4609 {"objc_storeWeak", llvm::Intrinsic::objc_storeWeak},
4610 {"objc_unsafeClaimAutoreleasedReturnValue",
4611 llvm::Intrinsic::objc_unsafeClaimAutoreleasedReturnValue},
4612 {"objc_retainedObject", llvm::Intrinsic::objc_retainedObject},
4613 {"objc_unretainedObject", llvm::Intrinsic::objc_unretainedObject},
4614 {"objc_unretainedPointer", llvm::Intrinsic::objc_unretainedPointer},
4615 {"objc_retain_autorelease", llvm::Intrinsic::objc_retain_autorelease},
4616 {"objc_sync_enter", llvm::Intrinsic::objc_sync_enter},
4617 {"objc_sync_exit", llvm::Intrinsic::objc_sync_exit},
4618 {"objc_arc_annotation_topdown_bbstart",
4619 llvm::Intrinsic::objc_arc_annotation_topdown_bbstart},
4620 {"objc_arc_annotation_topdown_bbend",
4621 llvm::Intrinsic::objc_arc_annotation_topdown_bbend},
4622 {"objc_arc_annotation_bottomup_bbstart",
4623 llvm::Intrinsic::objc_arc_annotation_bottomup_bbstart},
4624 {"objc_arc_annotation_bottomup_bbend",
4625 llvm::Intrinsic::objc_arc_annotation_bottomup_bbend}};
4626
4627 for (auto &I : RuntimeFuncs)
4628 UpgradeToIntrinsic(I.first, I.second);
4629}
4630
4631bool llvm::UpgradeModuleFlags(Module &M) {
4632 NamedMDNode *ModFlags = M.getModuleFlagsMetadata();
4633 if (!ModFlags)
4634 return false;
4635
4636 bool HasObjCFlag = false, HasClassProperties = false, Changed = false;
4637 bool HasSwiftVersionFlag = false;
4638 uint8_t SwiftMajorVersion, SwiftMinorVersion;
4639 uint32_t SwiftABIVersion;
4640 auto Int8Ty = Type::getInt8Ty(M.getContext());
4641 auto Int32Ty = Type::getInt32Ty(M.getContext());
4642
4643 for (unsigned I = 0, E = ModFlags->getNumOperands(); I != E; ++I) {
4644 MDNode *Op = ModFlags->getOperand(I);
4645 if (Op->getNumOperands() != 3)
4646 continue;
4647 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
4648 if (!ID)
4649 continue;
4650 auto SetBehavior = [&](Module::ModFlagBehavior B) {
4651 Metadata *Ops[3] = {ConstantAsMetadata::get(ConstantInt::get(
4652 Type::getInt32Ty(M.getContext()), B)),
4653 MDString::get(M.getContext(), ID->getString()),
4654 Op->getOperand(2)};
4655 ModFlags->setOperand(I, MDNode::get(M.getContext(), Ops));
4656 Changed = true;
4657 };
4658
4659 if (ID->getString() == "Objective-C Image Info Version")
4660 HasObjCFlag = true;
4661 if (ID->getString() == "Objective-C Class Properties")
4662 HasClassProperties = true;
4663 // Upgrade PIC from Error/Max to Min.
4664 if (ID->getString() == "PIC Level") {
4665 if (auto *Behavior =
4666 mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0))) {
4667 uint64_t V = Behavior->getLimitedValue();
4668 if (V == Module::Error || V == Module::Max)
4669 SetBehavior(Module::Min);
4670 }
4671 }
4672 // Upgrade "PIE Level" from Error to Max.
4673 if (ID->getString() == "PIE Level")
4674 if (auto *Behavior =
4675 mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0)))
4676 if (Behavior->getLimitedValue() == Module::Error)
4677 SetBehavior(Module::Max);
4678
4679 // Upgrade branch protection and return address signing module flags. The
4680 // module flag behavior for these fields were Error and now they are Min.
4681 if (ID->getString() == "branch-target-enforcement" ||
4682 ID->getString().startswith("sign-return-address")) {
4683 if (auto *Behavior =
4684 mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0))) {
4685 if (Behavior->getLimitedValue() == Module::Error) {
4686 Type *Int32Ty = Type::getInt32Ty(M.getContext());
4687 Metadata *Ops[3] = {
4688 ConstantAsMetadata::get(ConstantInt::get(Int32Ty, Module::Min)),
4689 Op->getOperand(1), Op->getOperand(2)};
4690 ModFlags->setOperand(I, MDNode::get(M.getContext(), Ops));
4691 Changed = true;
4692 }
4693 }
4694 }
4695
4696 // Upgrade Objective-C Image Info Section. Removed the whitespce in the
4697 // section name so that llvm-lto will not complain about mismatching
4698 // module flags that is functionally the same.
4699 if (ID->getString() == "Objective-C Image Info Section") {
4700 if (auto *Value = dyn_cast_or_null<MDString>(Op->getOperand(2))) {
4701 SmallVector<StringRef, 4> ValueComp;
4702 Value->getString().split(ValueComp, " ");
4703 if (ValueComp.size() != 1) {
4704 std::string NewValue;
4705 for (auto &S : ValueComp)
4706 NewValue += S.str();
4707 Metadata *Ops[3] = {Op->getOperand(0), Op->getOperand(1),
4708 MDString::get(M.getContext(), NewValue)};
4709 ModFlags->setOperand(I, MDNode::get(M.getContext(), Ops));
4710 Changed = true;
4711 }
4712 }
4713 }
4714
4715 // IRUpgrader turns a i32 type "Objective-C Garbage Collection" into i8 value.
4716 // If the higher bits are set, it adds new module flag for swift info.
4717 if (ID->getString() == "Objective-C Garbage Collection") {
4718 auto Md = dyn_cast<ConstantAsMetadata>(Op->getOperand(2));
4719 if (Md) {
4720 assert(Md->getValue() && "Expected non-empty metadata")(static_cast <bool> (Md->getValue() && "Expected non-empty metadata"
) ? void (0) : __assert_fail ("Md->getValue() && \"Expected non-empty metadata\""
, "llvm/lib/IR/AutoUpgrade.cpp", 4720, __extension__ __PRETTY_FUNCTION__
))
;
4721 auto Type = Md->getValue()->getType();
4722 if (Type == Int8Ty)
4723 continue;
4724 unsigned Val = Md->getValue()->getUniqueInteger().getZExtValue();
4725 if ((Val & 0xff) != Val) {
4726 HasSwiftVersionFlag = true;
4727 SwiftABIVersion = (Val & 0xff00) >> 8;
4728 SwiftMajorVersion = (Val & 0xff000000) >> 24;
4729 SwiftMinorVersion = (Val & 0xff0000) >> 16;
4730 }
4731 Metadata *Ops[3] = {
4732 ConstantAsMetadata::get(ConstantInt::get(Int32Ty,Module::Error)),
4733 Op->getOperand(1),
4734 ConstantAsMetadata::get(ConstantInt::get(Int8Ty,Val & 0xff))};
4735 ModFlags->setOperand(I, MDNode::get(M.getContext(), Ops));
4736 Changed = true;
4737 }
4738 }
4739 }
4740
4741 // "Objective-C Class Properties" is recently added for Objective-C. We
4742 // upgrade ObjC bitcodes to contain a "Objective-C Class Properties" module
4743 // flag of value 0, so we can correclty downgrade this flag when trying to
4744 // link an ObjC bitcode without this module flag with an ObjC bitcode with
4745 // this module flag.
4746 if (HasObjCFlag && !HasClassProperties) {
4747 M.addModuleFlag(llvm::Module::Override, "Objective-C Class Properties",
4748 (uint32_t)0);
4749 Changed = true;
4750 }
4751
4752 if (HasSwiftVersionFlag) {
4753 M.addModuleFlag(Module::Error, "Swift ABI Version",
4754 SwiftABIVersion);
4755 M.addModuleFlag(Module::Error, "Swift Major Version",
4756 ConstantInt::get(Int8Ty, SwiftMajorVersion));
4757 M.addModuleFlag(Module::Error, "Swift Minor Version",
4758 ConstantInt::get(Int8Ty, SwiftMinorVersion));
4759 Changed = true;
4760 }
4761
4762 return Changed;
4763}
4764
4765void llvm::UpgradeSectionAttributes(Module &M) {
4766 auto TrimSpaces = [](StringRef Section) -> std::string {
4767 SmallVector<StringRef, 5> Components;
4768 Section.split(Components, ',');
4769
4770 SmallString<32> Buffer;
4771 raw_svector_ostream OS(Buffer);
4772
4773 for (auto Component : Components)
4774 OS << ',' << Component.trim();
4775
4776 return std::string(OS.str().substr(1));
4777 };
4778
4779 for (auto &GV : M.globals()) {
4780 if (!GV.hasSection())
4781 continue;
4782
4783 StringRef Section = GV.getSection();
4784
4785 if (!Section.startswith("__DATA, __objc_catlist"))
4786 continue;
4787
4788 // __DATA, __objc_catlist, regular, no_dead_strip
4789 // __DATA,__objc_catlist,regular,no_dead_strip
4790 GV.setSection(TrimSpaces(Section));
4791 }
4792}
4793
4794namespace {
4795// Prior to LLVM 10.0, the strictfp attribute could be used on individual
4796// callsites within a function that did not also have the strictfp attribute.
4797// Since 10.0, if strict FP semantics are needed within a function, the
4798// function must have the strictfp attribute and all calls within the function
4799// must also have the strictfp attribute. This latter restriction is
4800// necessary to prevent unwanted libcall simplification when a function is
4801// being cloned (such as for inlining).
4802//
4803// The "dangling" strictfp attribute usage was only used to prevent constant
4804// folding and other libcall simplification. The nobuiltin attribute on the
4805// callsite has the same effect.
4806struct StrictFPUpgradeVisitor : public InstVisitor<StrictFPUpgradeVisitor> {
4807 StrictFPUpgradeVisitor() = default;
4808
4809 void visitCallBase(CallBase &Call) {
4810 if (!Call.isStrictFP())
4811 return;
4812 if (isa<ConstrainedFPIntrinsic>(&Call))
4813 return;
4814 // If we get here, the caller doesn't have the strictfp attribute
4815 // but this callsite does. Replace the strictfp attribute with nobuiltin.
4816 Call.removeFnAttr(Attribute::StrictFP);
4817 Call.addFnAttr(Attribute::NoBuiltin);
4818 }
4819};
4820} // namespace
4821
4822void llvm::UpgradeFunctionAttributes(Function &F) {
4823 // If a function definition doesn't have the strictfp attribute,
4824 // convert any callsite strictfp attributes to nobuiltin.
4825 if (!F.isDeclaration() && !F.hasFnAttribute(Attribute::StrictFP)) {
4826 StrictFPUpgradeVisitor SFPV;
4827 SFPV.visit(F);
4828 }
4829
4830 // Remove all incompatibile attributes from function.
4831 F.removeRetAttrs(AttributeFuncs::typeIncompatible(F.getReturnType()));
4832 for (auto &Arg : F.args())
4833 Arg.removeAttrs(AttributeFuncs::typeIncompatible(Arg.getType()));
4834}
4835
4836static bool isOldLoopArgument(Metadata *MD) {
4837 auto *T = dyn_cast_or_null<MDTuple>(MD);
4838 if (!T)
4839 return false;
4840 if (T->getNumOperands() < 1)
4841 return false;
4842 auto *S = dyn_cast_or_null<MDString>(T->getOperand(0));
4843 if (!S)
4844 return false;
4845 return S->getString().startswith("llvm.vectorizer.");
4846}
4847
4848static MDString *upgradeLoopTag(LLVMContext &C, StringRef OldTag) {
4849 StringRef OldPrefix = "llvm.vectorizer.";
4850 assert(OldTag.startswith(OldPrefix) && "Expected old prefix")(static_cast <bool> (OldTag.startswith(OldPrefix) &&
"Expected old prefix") ? void (0) : __assert_fail ("OldTag.startswith(OldPrefix) && \"Expected old prefix\""
, "llvm/lib/IR/AutoUpgrade.cpp", 4850, __extension__ __PRETTY_FUNCTION__
))
;
4851
4852 if (OldTag == "llvm.vectorizer.unroll")
4853 return MDString::get(C, "llvm.loop.interleave.count");
4854
4855 return MDString::get(
4856 C, (Twine("llvm.loop.vectorize.") + OldTag.drop_front(OldPrefix.size()))
4857 .str());
4858}
4859
4860static Metadata *upgradeLoopArgument(Metadata *MD) {
4861 auto *T = dyn_cast_or_null<MDTuple>(MD);
4862 if (!T)
4863 return MD;
4864 if (T->getNumOperands() < 1)
4865 return MD;
4866 auto *OldTag = dyn_cast_or_null<MDString>(T->getOperand(0));
4867 if (!OldTag)
4868 return MD;
4869 if (!OldTag->getString().startswith("llvm.vectorizer."))
4870 return MD;
4871
4872 // This has an old tag. Upgrade it.
4873 SmallVector<Metadata *, 8> Ops;
4874 Ops.reserve(T->getNumOperands());
4875 Ops.push_back(upgradeLoopTag(T->getContext(), OldTag->getString()));
4876 for (unsigned I = 1, E = T->getNumOperands(); I != E; ++I)
4877 Ops.push_back(T->getOperand(I));
4878
4879 return MDTuple::get(T->getContext(), Ops);
4880}
4881
4882MDNode *llvm::upgradeInstructionLoopAttachment(MDNode &N) {
4883 auto *T = dyn_cast<MDTuple>(&N);
4884 if (!T)
4885 return &N;
4886
4887 if (none_of(T->operands(), isOldLoopArgument))
4888 return &N;
4889
4890 SmallVector<Metadata *, 8> Ops;
4891 Ops.reserve(T->getNumOperands());
4892 for (Metadata *MD : T->operands())
4893 Ops.push_back(upgradeLoopArgument(MD));
4894
4895 return MDTuple::get(T->getContext(), Ops);
4896}
4897
4898std::string llvm::UpgradeDataLayoutString(StringRef DL, StringRef TT) {
4899 Triple T(TT);
4900 // For AMDGPU we uprgrade older DataLayouts to include the default globals
4901 // address space of 1.
4902 if (T.isAMDGPU() && !DL.contains("-G") && !DL.startswith("G")) {
4903 return DL.empty() ? std::string("G1") : (DL + "-G1").str();
4904 }
4905
4906 if (T.isRISCV64()) {
4907 // Make i32 a native type for 64-bit RISC-V.
4908 auto I = DL.find("-n64-");
4909 if (I != StringRef::npos)
4910 return (DL.take_front(I) + "-n32:64-" + DL.drop_front(I + 5)).str();
4911 return DL.str();
4912 }
4913
4914 std::string Res = DL.str();
4915 if (!T.isX86())
4916 return Res;
4917
4918 // If the datalayout matches the expected format, add pointer size address
4919 // spaces to the datalayout.
4920 std::string AddrSpaces = "-p270:32:32-p271:32:32-p272:64:64";
4921 if (!DL.contains(AddrSpaces)) {
4922 SmallVector<StringRef, 4> Groups;
4923 Regex R("(e-m:[a-z](-p:32:32)?)(-[if]64:.*$)");
4924 if (R.match(DL, &Groups))
4925 Res = (Groups[1] + AddrSpaces + Groups[3]).str();
4926 }
4927
4928 // For 32-bit MSVC targets, raise the alignment of f80 values to 16 bytes.
4929 // Raising the alignment is safe because Clang did not produce f80 values in
4930 // the MSVC environment before this upgrade was added.
4931 if (T.isWindowsMSVCEnvironment() && !T.isArch64Bit()) {
4932 StringRef Ref = Res;
4933 auto I = Ref.find("-f80:32-");
4934 if (I != StringRef::npos)
4935 Res = (Ref.take_front(I) + "-f80:128-" + Ref.drop_front(I + 8)).str();
4936 }
4937
4938 return Res;
4939}
4940
4941void llvm::UpgradeAttributes(AttrBuilder &B) {
4942 StringRef FramePointer;
4943 Attribute A = B.getAttribute("no-frame-pointer-elim");
4944 if (A.isValid()) {
4945 // The value can be "true" or "false".
4946 FramePointer = A.getValueAsString() == "true" ? "all" : "none";
4947 B.removeAttribute("no-frame-pointer-elim");
4948 }
4949 if (B.contains("no-frame-pointer-elim-non-leaf")) {
4950 // The value is ignored. "no-frame-pointer-elim"="true" takes priority.
4951 if (FramePointer != "all")
4952 FramePointer = "non-leaf";
4953 B.removeAttribute("no-frame-pointer-elim-non-leaf");
4954 }
4955 if (!FramePointer.empty())
4956 B.addAttribute("frame-pointer", FramePointer);
4957
4958 A = B.getAttribute("null-pointer-is-valid");
4959 if (A.isValid()) {
4960 // The value can be "true" or "false".
4961 bool NullPointerIsValid = A.getValueAsString() == "true";
4962 B.removeAttribute("null-pointer-is-valid");
4963 if (NullPointerIsValid)
4964 B.addAttribute(Attribute::NullPointerIsValid);
4965 }
4966}
4967
4968void llvm::UpgradeOperandBundles(std::vector<OperandBundleDef> &Bundles) {
4969
4970 // clang.arc.attachedcall bundles are now required to have an operand.
4971 // If they don't, it's okay to drop them entirely: when there is an operand,
4972 // the "attachedcall" is meaningful and required, but without an operand,
4973 // it's just a marker NOP. Dropping it merely prevents an optimization.
4974 erase_if(Bundles, [&](OperandBundleDef &OBD) {
4975 return OBD.getTag() == "clang.arc.attachedcall" &&
4976 OBD.inputs().empty();
4977 });
4978}