Bug Summary

File:build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/llvm/lib/IR/AutoUpgrade.cpp
Warning:line 1584, column 32
Division by zero

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name AutoUpgrade.cpp -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/build-llvm -resource-dir /usr/lib/llvm-16/lib/clang/16.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/IR -I /build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/llvm/lib/IR -I include -I /build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/llvm/include -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-16/lib/clang/16.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/build-llvm=build-llvm -fmacro-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/= -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/build-llvm=build-llvm -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/= -O3 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -Wno-misleading-indentation -std=c++17 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/build-llvm=build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/= -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-10-03-140002-15933-1 -x c++ /build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/llvm/lib/IR/AutoUpgrade.cpp
1//===-- AutoUpgrade.cpp - Implement auto-upgrade helper functions ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the auto-upgrade helper functions.
10// This is where deprecated IR intrinsics and other IR features are updated to
11// current specifications.
12//
13//===----------------------------------------------------------------------===//
14
15#include "llvm/IR/AutoUpgrade.h"
16#include "llvm/ADT/StringSwitch.h"
17#include "llvm/ADT/Triple.h"
18#include "llvm/IR/Constants.h"
19#include "llvm/IR/DebugInfo.h"
20#include "llvm/IR/DiagnosticInfo.h"
21#include "llvm/IR/Function.h"
22#include "llvm/IR/IRBuilder.h"
23#include "llvm/IR/InstVisitor.h"
24#include "llvm/IR/Instruction.h"
25#include "llvm/IR/IntrinsicInst.h"
26#include "llvm/IR/Intrinsics.h"
27#include "llvm/IR/IntrinsicsAArch64.h"
28#include "llvm/IR/IntrinsicsARM.h"
29#include "llvm/IR/IntrinsicsX86.h"
30#include "llvm/IR/LLVMContext.h"
31#include "llvm/IR/Module.h"
32#include "llvm/IR/Verifier.h"
33#include "llvm/Support/ErrorHandling.h"
34#include "llvm/Support/Regex.h"
35#include <cstring>
36using namespace llvm;
37
38static void rename(GlobalValue *GV) { GV->setName(GV->getName() + ".old"); }
39
40// Upgrade the declarations of the SSE4.1 ptest intrinsics whose arguments have
41// changed their type from v4f32 to v2i64.
42static bool UpgradePTESTIntrinsic(Function* F, Intrinsic::ID IID,
43 Function *&NewFn) {
44 // Check whether this is an old version of the function, which received
45 // v4f32 arguments.
46 Type *Arg0Type = F->getFunctionType()->getParamType(0);
47 if (Arg0Type != FixedVectorType::get(Type::getFloatTy(F->getContext()), 4))
48 return false;
49
50 // Yes, it's old, replace it with new version.
51 rename(F);
52 NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
53 return true;
54}
55
56// Upgrade the declarations of intrinsic functions whose 8-bit immediate mask
57// arguments have changed their type from i32 to i8.
58static bool UpgradeX86IntrinsicsWith8BitMask(Function *F, Intrinsic::ID IID,
59 Function *&NewFn) {
60 // Check that the last argument is an i32.
61 Type *LastArgType = F->getFunctionType()->getParamType(
62 F->getFunctionType()->getNumParams() - 1);
63 if (!LastArgType->isIntegerTy(32))
64 return false;
65
66 // Move this function aside and map down.
67 rename(F);
68 NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
69 return true;
70}
71
72// Upgrade the declaration of fp compare intrinsics that change return type
73// from scalar to vXi1 mask.
74static bool UpgradeX86MaskedFPCompare(Function *F, Intrinsic::ID IID,
75 Function *&NewFn) {
76 // Check if the return type is a vector.
77 if (F->getReturnType()->isVectorTy())
78 return false;
79
80 rename(F);
81 NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
82 return true;
83}
84
85static bool ShouldUpgradeX86Intrinsic(Function *F, StringRef Name) {
86 // All of the intrinsics matches below should be marked with which llvm
87 // version started autoupgrading them. At some point in the future we would
88 // like to use this information to remove upgrade code for some older
89 // intrinsics. It is currently undecided how we will determine that future
90 // point.
91 if (Name == "addcarryx.u32" || // Added in 8.0
92 Name == "addcarryx.u64" || // Added in 8.0
93 Name == "addcarry.u32" || // Added in 8.0
94 Name == "addcarry.u64" || // Added in 8.0
95 Name == "subborrow.u32" || // Added in 8.0
96 Name == "subborrow.u64" || // Added in 8.0
97 Name.startswith("sse2.padds.") || // Added in 8.0
98 Name.startswith("sse2.psubs.") || // Added in 8.0
99 Name.startswith("sse2.paddus.") || // Added in 8.0
100 Name.startswith("sse2.psubus.") || // Added in 8.0
101 Name.startswith("avx2.padds.") || // Added in 8.0
102 Name.startswith("avx2.psubs.") || // Added in 8.0
103 Name.startswith("avx2.paddus.") || // Added in 8.0
104 Name.startswith("avx2.psubus.") || // Added in 8.0
105 Name.startswith("avx512.padds.") || // Added in 8.0
106 Name.startswith("avx512.psubs.") || // Added in 8.0
107 Name.startswith("avx512.mask.padds.") || // Added in 8.0
108 Name.startswith("avx512.mask.psubs.") || // Added in 8.0
109 Name.startswith("avx512.mask.paddus.") || // Added in 8.0
110 Name.startswith("avx512.mask.psubus.") || // Added in 8.0
111 Name=="ssse3.pabs.b.128" || // Added in 6.0
112 Name=="ssse3.pabs.w.128" || // Added in 6.0
113 Name=="ssse3.pabs.d.128" || // Added in 6.0
114 Name.startswith("fma4.vfmadd.s") || // Added in 7.0
115 Name.startswith("fma.vfmadd.") || // Added in 7.0
116 Name.startswith("fma.vfmsub.") || // Added in 7.0
117 Name.startswith("fma.vfmsubadd.") || // Added in 7.0
118 Name.startswith("fma.vfnmadd.") || // Added in 7.0
119 Name.startswith("fma.vfnmsub.") || // Added in 7.0
120 Name.startswith("avx512.mask.vfmadd.") || // Added in 7.0
121 Name.startswith("avx512.mask.vfnmadd.") || // Added in 7.0
122 Name.startswith("avx512.mask.vfnmsub.") || // Added in 7.0
123 Name.startswith("avx512.mask3.vfmadd.") || // Added in 7.0
124 Name.startswith("avx512.maskz.vfmadd.") || // Added in 7.0
125 Name.startswith("avx512.mask3.vfmsub.") || // Added in 7.0
126 Name.startswith("avx512.mask3.vfnmsub.") || // Added in 7.0
127 Name.startswith("avx512.mask.vfmaddsub.") || // Added in 7.0
128 Name.startswith("avx512.maskz.vfmaddsub.") || // Added in 7.0
129 Name.startswith("avx512.mask3.vfmaddsub.") || // Added in 7.0
130 Name.startswith("avx512.mask3.vfmsubadd.") || // Added in 7.0
131 Name.startswith("avx512.mask.shuf.i") || // Added in 6.0
132 Name.startswith("avx512.mask.shuf.f") || // Added in 6.0
133 Name.startswith("avx512.kunpck") || //added in 6.0
134 Name.startswith("avx2.pabs.") || // Added in 6.0
135 Name.startswith("avx512.mask.pabs.") || // Added in 6.0
136 Name.startswith("avx512.broadcastm") || // Added in 6.0
137 Name == "sse.sqrt.ss" || // Added in 7.0
138 Name == "sse2.sqrt.sd" || // Added in 7.0
139 Name.startswith("avx512.mask.sqrt.p") || // Added in 7.0
140 Name.startswith("avx.sqrt.p") || // Added in 7.0
141 Name.startswith("sse2.sqrt.p") || // Added in 7.0
142 Name.startswith("sse.sqrt.p") || // Added in 7.0
143 Name.startswith("avx512.mask.pbroadcast") || // Added in 6.0
144 Name.startswith("sse2.pcmpeq.") || // Added in 3.1
145 Name.startswith("sse2.pcmpgt.") || // Added in 3.1
146 Name.startswith("avx2.pcmpeq.") || // Added in 3.1
147 Name.startswith("avx2.pcmpgt.") || // Added in 3.1
148 Name.startswith("avx512.mask.pcmpeq.") || // Added in 3.9
149 Name.startswith("avx512.mask.pcmpgt.") || // Added in 3.9
150 Name.startswith("avx.vperm2f128.") || // Added in 6.0
151 Name == "avx2.vperm2i128" || // Added in 6.0
152 Name == "sse.add.ss" || // Added in 4.0
153 Name == "sse2.add.sd" || // Added in 4.0
154 Name == "sse.sub.ss" || // Added in 4.0
155 Name == "sse2.sub.sd" || // Added in 4.0
156 Name == "sse.mul.ss" || // Added in 4.0
157 Name == "sse2.mul.sd" || // Added in 4.0
158 Name == "sse.div.ss" || // Added in 4.0
159 Name == "sse2.div.sd" || // Added in 4.0
160 Name == "sse41.pmaxsb" || // Added in 3.9
161 Name == "sse2.pmaxs.w" || // Added in 3.9
162 Name == "sse41.pmaxsd" || // Added in 3.9
163 Name == "sse2.pmaxu.b" || // Added in 3.9
164 Name == "sse41.pmaxuw" || // Added in 3.9
165 Name == "sse41.pmaxud" || // Added in 3.9
166 Name == "sse41.pminsb" || // Added in 3.9
167 Name == "sse2.pmins.w" || // Added in 3.9
168 Name == "sse41.pminsd" || // Added in 3.9
169 Name == "sse2.pminu.b" || // Added in 3.9
170 Name == "sse41.pminuw" || // Added in 3.9
171 Name == "sse41.pminud" || // Added in 3.9
172 Name == "avx512.kand.w" || // Added in 7.0
173 Name == "avx512.kandn.w" || // Added in 7.0
174 Name == "avx512.knot.w" || // Added in 7.0
175 Name == "avx512.kor.w" || // Added in 7.0
176 Name == "avx512.kxor.w" || // Added in 7.0
177 Name == "avx512.kxnor.w" || // Added in 7.0
178 Name == "avx512.kortestc.w" || // Added in 7.0
179 Name == "avx512.kortestz.w" || // Added in 7.0
180 Name.startswith("avx512.mask.pshuf.b.") || // Added in 4.0
181 Name.startswith("avx2.pmax") || // Added in 3.9
182 Name.startswith("avx2.pmin") || // Added in 3.9
183 Name.startswith("avx512.mask.pmax") || // Added in 4.0
184 Name.startswith("avx512.mask.pmin") || // Added in 4.0
185 Name.startswith("avx2.vbroadcast") || // Added in 3.8
186 Name.startswith("avx2.pbroadcast") || // Added in 3.8
187 Name.startswith("avx.vpermil.") || // Added in 3.1
188 Name.startswith("sse2.pshuf") || // Added in 3.9
189 Name.startswith("avx512.pbroadcast") || // Added in 3.9
190 Name.startswith("avx512.mask.broadcast.s") || // Added in 3.9
191 Name.startswith("avx512.mask.movddup") || // Added in 3.9
192 Name.startswith("avx512.mask.movshdup") || // Added in 3.9
193 Name.startswith("avx512.mask.movsldup") || // Added in 3.9
194 Name.startswith("avx512.mask.pshuf.d.") || // Added in 3.9
195 Name.startswith("avx512.mask.pshufl.w.") || // Added in 3.9
196 Name.startswith("avx512.mask.pshufh.w.") || // Added in 3.9
197 Name.startswith("avx512.mask.shuf.p") || // Added in 4.0
198 Name.startswith("avx512.mask.vpermil.p") || // Added in 3.9
199 Name.startswith("avx512.mask.perm.df.") || // Added in 3.9
200 Name.startswith("avx512.mask.perm.di.") || // Added in 3.9
201 Name.startswith("avx512.mask.punpckl") || // Added in 3.9
202 Name.startswith("avx512.mask.punpckh") || // Added in 3.9
203 Name.startswith("avx512.mask.unpckl.") || // Added in 3.9
204 Name.startswith("avx512.mask.unpckh.") || // Added in 3.9
205 Name.startswith("avx512.mask.pand.") || // Added in 3.9
206 Name.startswith("avx512.mask.pandn.") || // Added in 3.9
207 Name.startswith("avx512.mask.por.") || // Added in 3.9
208 Name.startswith("avx512.mask.pxor.") || // Added in 3.9
209 Name.startswith("avx512.mask.and.") || // Added in 3.9
210 Name.startswith("avx512.mask.andn.") || // Added in 3.9
211 Name.startswith("avx512.mask.or.") || // Added in 3.9
212 Name.startswith("avx512.mask.xor.") || // Added in 3.9
213 Name.startswith("avx512.mask.padd.") || // Added in 4.0
214 Name.startswith("avx512.mask.psub.") || // Added in 4.0
215 Name.startswith("avx512.mask.pmull.") || // Added in 4.0
216 Name.startswith("avx512.mask.cvtdq2pd.") || // Added in 4.0
217 Name.startswith("avx512.mask.cvtudq2pd.") || // Added in 4.0
218 Name.startswith("avx512.mask.cvtudq2ps.") || // Added in 7.0 updated 9.0
219 Name.startswith("avx512.mask.cvtqq2pd.") || // Added in 7.0 updated 9.0
220 Name.startswith("avx512.mask.cvtuqq2pd.") || // Added in 7.0 updated 9.0
221 Name.startswith("avx512.mask.cvtdq2ps.") || // Added in 7.0 updated 9.0
222 Name == "avx512.mask.vcvtph2ps.128" || // Added in 11.0
223 Name == "avx512.mask.vcvtph2ps.256" || // Added in 11.0
224 Name == "avx512.mask.cvtqq2ps.256" || // Added in 9.0
225 Name == "avx512.mask.cvtqq2ps.512" || // Added in 9.0
226 Name == "avx512.mask.cvtuqq2ps.256" || // Added in 9.0
227 Name == "avx512.mask.cvtuqq2ps.512" || // Added in 9.0
228 Name == "avx512.mask.cvtpd2dq.256" || // Added in 7.0
229 Name == "avx512.mask.cvtpd2ps.256" || // Added in 7.0
230 Name == "avx512.mask.cvttpd2dq.256" || // Added in 7.0
231 Name == "avx512.mask.cvttps2dq.128" || // Added in 7.0
232 Name == "avx512.mask.cvttps2dq.256" || // Added in 7.0
233 Name == "avx512.mask.cvtps2pd.128" || // Added in 7.0
234 Name == "avx512.mask.cvtps2pd.256" || // Added in 7.0
235 Name == "avx512.cvtusi2sd" || // Added in 7.0
236 Name.startswith("avx512.mask.permvar.") || // Added in 7.0
237 Name == "sse2.pmulu.dq" || // Added in 7.0
238 Name == "sse41.pmuldq" || // Added in 7.0
239 Name == "avx2.pmulu.dq" || // Added in 7.0
240 Name == "avx2.pmul.dq" || // Added in 7.0
241 Name == "avx512.pmulu.dq.512" || // Added in 7.0
242 Name == "avx512.pmul.dq.512" || // Added in 7.0
243 Name.startswith("avx512.mask.pmul.dq.") || // Added in 4.0
244 Name.startswith("avx512.mask.pmulu.dq.") || // Added in 4.0
245 Name.startswith("avx512.mask.pmul.hr.sw.") || // Added in 7.0
246 Name.startswith("avx512.mask.pmulh.w.") || // Added in 7.0
247 Name.startswith("avx512.mask.pmulhu.w.") || // Added in 7.0
248 Name.startswith("avx512.mask.pmaddw.d.") || // Added in 7.0
249 Name.startswith("avx512.mask.pmaddubs.w.") || // Added in 7.0
250 Name.startswith("avx512.mask.packsswb.") || // Added in 5.0
251 Name.startswith("avx512.mask.packssdw.") || // Added in 5.0
252 Name.startswith("avx512.mask.packuswb.") || // Added in 5.0
253 Name.startswith("avx512.mask.packusdw.") || // Added in 5.0
254 Name.startswith("avx512.mask.cmp.b") || // Added in 5.0
255 Name.startswith("avx512.mask.cmp.d") || // Added in 5.0
256 Name.startswith("avx512.mask.cmp.q") || // Added in 5.0
257 Name.startswith("avx512.mask.cmp.w") || // Added in 5.0
258 Name.startswith("avx512.cmp.p") || // Added in 12.0
259 Name.startswith("avx512.mask.ucmp.") || // Added in 5.0
260 Name.startswith("avx512.cvtb2mask.") || // Added in 7.0
261 Name.startswith("avx512.cvtw2mask.") || // Added in 7.0
262 Name.startswith("avx512.cvtd2mask.") || // Added in 7.0
263 Name.startswith("avx512.cvtq2mask.") || // Added in 7.0
264 Name.startswith("avx512.mask.vpermilvar.") || // Added in 4.0
265 Name.startswith("avx512.mask.psll.d") || // Added in 4.0
266 Name.startswith("avx512.mask.psll.q") || // Added in 4.0
267 Name.startswith("avx512.mask.psll.w") || // Added in 4.0
268 Name.startswith("avx512.mask.psra.d") || // Added in 4.0
269 Name.startswith("avx512.mask.psra.q") || // Added in 4.0
270 Name.startswith("avx512.mask.psra.w") || // Added in 4.0
271 Name.startswith("avx512.mask.psrl.d") || // Added in 4.0
272 Name.startswith("avx512.mask.psrl.q") || // Added in 4.0
273 Name.startswith("avx512.mask.psrl.w") || // Added in 4.0
274 Name.startswith("avx512.mask.pslli") || // Added in 4.0
275 Name.startswith("avx512.mask.psrai") || // Added in 4.0
276 Name.startswith("avx512.mask.psrli") || // Added in 4.0
277 Name.startswith("avx512.mask.psllv") || // Added in 4.0
278 Name.startswith("avx512.mask.psrav") || // Added in 4.0
279 Name.startswith("avx512.mask.psrlv") || // Added in 4.0
280 Name.startswith("sse41.pmovsx") || // Added in 3.8
281 Name.startswith("sse41.pmovzx") || // Added in 3.9
282 Name.startswith("avx2.pmovsx") || // Added in 3.9
283 Name.startswith("avx2.pmovzx") || // Added in 3.9
284 Name.startswith("avx512.mask.pmovsx") || // Added in 4.0
285 Name.startswith("avx512.mask.pmovzx") || // Added in 4.0
286 Name.startswith("avx512.mask.lzcnt.") || // Added in 5.0
287 Name.startswith("avx512.mask.pternlog.") || // Added in 7.0
288 Name.startswith("avx512.maskz.pternlog.") || // Added in 7.0
289 Name.startswith("avx512.mask.vpmadd52") || // Added in 7.0
290 Name.startswith("avx512.maskz.vpmadd52") || // Added in 7.0
291 Name.startswith("avx512.mask.vpermi2var.") || // Added in 7.0
292 Name.startswith("avx512.mask.vpermt2var.") || // Added in 7.0
293 Name.startswith("avx512.maskz.vpermt2var.") || // Added in 7.0
294 Name.startswith("avx512.mask.vpdpbusd.") || // Added in 7.0
295 Name.startswith("avx512.maskz.vpdpbusd.") || // Added in 7.0
296 Name.startswith("avx512.mask.vpdpbusds.") || // Added in 7.0
297 Name.startswith("avx512.maskz.vpdpbusds.") || // Added in 7.0
298 Name.startswith("avx512.mask.vpdpwssd.") || // Added in 7.0
299 Name.startswith("avx512.maskz.vpdpwssd.") || // Added in 7.0
300 Name.startswith("avx512.mask.vpdpwssds.") || // Added in 7.0
301 Name.startswith("avx512.maskz.vpdpwssds.") || // Added in 7.0
302 Name.startswith("avx512.mask.dbpsadbw.") || // Added in 7.0
303 Name.startswith("avx512.mask.vpshld.") || // Added in 7.0
304 Name.startswith("avx512.mask.vpshrd.") || // Added in 7.0
305 Name.startswith("avx512.mask.vpshldv.") || // Added in 8.0
306 Name.startswith("avx512.mask.vpshrdv.") || // Added in 8.0
307 Name.startswith("avx512.maskz.vpshldv.") || // Added in 8.0
308 Name.startswith("avx512.maskz.vpshrdv.") || // Added in 8.0
309 Name.startswith("avx512.vpshld.") || // Added in 8.0
310 Name.startswith("avx512.vpshrd.") || // Added in 8.0
311 Name.startswith("avx512.mask.add.p") || // Added in 7.0. 128/256 in 4.0
312 Name.startswith("avx512.mask.sub.p") || // Added in 7.0. 128/256 in 4.0
313 Name.startswith("avx512.mask.mul.p") || // Added in 7.0. 128/256 in 4.0
314 Name.startswith("avx512.mask.div.p") || // Added in 7.0. 128/256 in 4.0
315 Name.startswith("avx512.mask.max.p") || // Added in 7.0. 128/256 in 5.0
316 Name.startswith("avx512.mask.min.p") || // Added in 7.0. 128/256 in 5.0
317 Name.startswith("avx512.mask.fpclass.p") || // Added in 7.0
318 Name.startswith("avx512.mask.vpshufbitqmb.") || // Added in 8.0
319 Name.startswith("avx512.mask.pmultishift.qb.") || // Added in 8.0
320 Name.startswith("avx512.mask.conflict.") || // Added in 9.0
321 Name == "avx512.mask.pmov.qd.256" || // Added in 9.0
322 Name == "avx512.mask.pmov.qd.512" || // Added in 9.0
323 Name == "avx512.mask.pmov.wb.256" || // Added in 9.0
324 Name == "avx512.mask.pmov.wb.512" || // Added in 9.0
325 Name == "sse.cvtsi2ss" || // Added in 7.0
326 Name == "sse.cvtsi642ss" || // Added in 7.0
327 Name == "sse2.cvtsi2sd" || // Added in 7.0
328 Name == "sse2.cvtsi642sd" || // Added in 7.0
329 Name == "sse2.cvtss2sd" || // Added in 7.0
330 Name == "sse2.cvtdq2pd" || // Added in 3.9
331 Name == "sse2.cvtdq2ps" || // Added in 7.0
332 Name == "sse2.cvtps2pd" || // Added in 3.9
333 Name == "avx.cvtdq2.pd.256" || // Added in 3.9
334 Name == "avx.cvtdq2.ps.256" || // Added in 7.0
335 Name == "avx.cvt.ps2.pd.256" || // Added in 3.9
336 Name.startswith("vcvtph2ps.") || // Added in 11.0
337 Name.startswith("avx.vinsertf128.") || // Added in 3.7
338 Name == "avx2.vinserti128" || // Added in 3.7
339 Name.startswith("avx512.mask.insert") || // Added in 4.0
340 Name.startswith("avx.vextractf128.") || // Added in 3.7
341 Name == "avx2.vextracti128" || // Added in 3.7
342 Name.startswith("avx512.mask.vextract") || // Added in 4.0
343 Name.startswith("sse4a.movnt.") || // Added in 3.9
344 Name.startswith("avx.movnt.") || // Added in 3.2
345 Name.startswith("avx512.storent.") || // Added in 3.9
346 Name == "sse41.movntdqa" || // Added in 5.0
347 Name == "avx2.movntdqa" || // Added in 5.0
348 Name == "avx512.movntdqa" || // Added in 5.0
349 Name == "sse2.storel.dq" || // Added in 3.9
350 Name.startswith("sse.storeu.") || // Added in 3.9
351 Name.startswith("sse2.storeu.") || // Added in 3.9
352 Name.startswith("avx.storeu.") || // Added in 3.9
353 Name.startswith("avx512.mask.storeu.") || // Added in 3.9
354 Name.startswith("avx512.mask.store.p") || // Added in 3.9
355 Name.startswith("avx512.mask.store.b.") || // Added in 3.9
356 Name.startswith("avx512.mask.store.w.") || // Added in 3.9
357 Name.startswith("avx512.mask.store.d.") || // Added in 3.9
358 Name.startswith("avx512.mask.store.q.") || // Added in 3.9
359 Name == "avx512.mask.store.ss" || // Added in 7.0
360 Name.startswith("avx512.mask.loadu.") || // Added in 3.9
361 Name.startswith("avx512.mask.load.") || // Added in 3.9
362 Name.startswith("avx512.mask.expand.load.") || // Added in 7.0
363 Name.startswith("avx512.mask.compress.store.") || // Added in 7.0
364 Name.startswith("avx512.mask.expand.b") || // Added in 9.0
365 Name.startswith("avx512.mask.expand.w") || // Added in 9.0
366 Name.startswith("avx512.mask.expand.d") || // Added in 9.0
367 Name.startswith("avx512.mask.expand.q") || // Added in 9.0
368 Name.startswith("avx512.mask.expand.p") || // Added in 9.0
369 Name.startswith("avx512.mask.compress.b") || // Added in 9.0
370 Name.startswith("avx512.mask.compress.w") || // Added in 9.0
371 Name.startswith("avx512.mask.compress.d") || // Added in 9.0
372 Name.startswith("avx512.mask.compress.q") || // Added in 9.0
373 Name.startswith("avx512.mask.compress.p") || // Added in 9.0
374 Name == "sse42.crc32.64.8" || // Added in 3.4
375 Name.startswith("avx.vbroadcast.s") || // Added in 3.5
376 Name.startswith("avx512.vbroadcast.s") || // Added in 7.0
377 Name.startswith("avx512.mask.palignr.") || // Added in 3.9
378 Name.startswith("avx512.mask.valign.") || // Added in 4.0
379 Name.startswith("sse2.psll.dq") || // Added in 3.7
380 Name.startswith("sse2.psrl.dq") || // Added in 3.7
381 Name.startswith("avx2.psll.dq") || // Added in 3.7
382 Name.startswith("avx2.psrl.dq") || // Added in 3.7
383 Name.startswith("avx512.psll.dq") || // Added in 3.9
384 Name.startswith("avx512.psrl.dq") || // Added in 3.9
385 Name == "sse41.pblendw" || // Added in 3.7
386 Name.startswith("sse41.blendp") || // Added in 3.7
387 Name.startswith("avx.blend.p") || // Added in 3.7
388 Name == "avx2.pblendw" || // Added in 3.7
389 Name.startswith("avx2.pblendd.") || // Added in 3.7
390 Name.startswith("avx.vbroadcastf128") || // Added in 4.0
391 Name == "avx2.vbroadcasti128" || // Added in 3.7
392 Name.startswith("avx512.mask.broadcastf32x4.") || // Added in 6.0
393 Name.startswith("avx512.mask.broadcastf64x2.") || // Added in 6.0
394 Name.startswith("avx512.mask.broadcastf32x8.") || // Added in 6.0
395 Name.startswith("avx512.mask.broadcastf64x4.") || // Added in 6.0
396 Name.startswith("avx512.mask.broadcasti32x4.") || // Added in 6.0
397 Name.startswith("avx512.mask.broadcasti64x2.") || // Added in 6.0
398 Name.startswith("avx512.mask.broadcasti32x8.") || // Added in 6.0
399 Name.startswith("avx512.mask.broadcasti64x4.") || // Added in 6.0
400 Name == "xop.vpcmov" || // Added in 3.8
401 Name == "xop.vpcmov.256" || // Added in 5.0
402 Name.startswith("avx512.mask.move.s") || // Added in 4.0
403 Name.startswith("avx512.cvtmask2") || // Added in 5.0
404 Name.startswith("xop.vpcom") || // Added in 3.2, Updated in 9.0
405 Name.startswith("xop.vprot") || // Added in 8.0
406 Name.startswith("avx512.prol") || // Added in 8.0
407 Name.startswith("avx512.pror") || // Added in 8.0
408 Name.startswith("avx512.mask.prorv.") || // Added in 8.0
409 Name.startswith("avx512.mask.pror.") || // Added in 8.0
410 Name.startswith("avx512.mask.prolv.") || // Added in 8.0
411 Name.startswith("avx512.mask.prol.") || // Added in 8.0
412 Name.startswith("avx512.ptestm") || //Added in 6.0
413 Name.startswith("avx512.ptestnm") || //Added in 6.0
414 Name.startswith("avx512.mask.pavg")) // Added in 6.0
415 return true;
416
417 return false;
418}
419
420static bool UpgradeX86IntrinsicFunction(Function *F, StringRef Name,
421 Function *&NewFn) {
422 // Only handle intrinsics that start with "x86.".
423 if (!Name.startswith("x86."))
424 return false;
425 // Remove "x86." prefix.
426 Name = Name.substr(4);
427
428 if (ShouldUpgradeX86Intrinsic(F, Name)) {
429 NewFn = nullptr;
430 return true;
431 }
432
433 if (Name == "rdtscp") { // Added in 8.0
434 // If this intrinsic has 0 operands, it's the new version.
435 if (F->getFunctionType()->getNumParams() == 0)
436 return false;
437
438 rename(F);
439 NewFn = Intrinsic::getDeclaration(F->getParent(),
440 Intrinsic::x86_rdtscp);
441 return true;
442 }
443
444 // SSE4.1 ptest functions may have an old signature.
445 if (Name.startswith("sse41.ptest")) { // Added in 3.2
446 if (Name.substr(11) == "c")
447 return UpgradePTESTIntrinsic(F, Intrinsic::x86_sse41_ptestc, NewFn);
448 if (Name.substr(11) == "z")
449 return UpgradePTESTIntrinsic(F, Intrinsic::x86_sse41_ptestz, NewFn);
450 if (Name.substr(11) == "nzc")
451 return UpgradePTESTIntrinsic(F, Intrinsic::x86_sse41_ptestnzc, NewFn);
452 }
453 // Several blend and other instructions with masks used the wrong number of
454 // bits.
455 if (Name == "sse41.insertps") // Added in 3.6
456 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_insertps,
457 NewFn);
458 if (Name == "sse41.dppd") // Added in 3.6
459 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_dppd,
460 NewFn);
461 if (Name == "sse41.dpps") // Added in 3.6
462 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_dpps,
463 NewFn);
464 if (Name == "sse41.mpsadbw") // Added in 3.6
465 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_mpsadbw,
466 NewFn);
467 if (Name == "avx.dp.ps.256") // Added in 3.6
468 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_avx_dp_ps_256,
469 NewFn);
470 if (Name == "avx2.mpsadbw") // Added in 3.6
471 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_avx2_mpsadbw,
472 NewFn);
473 if (Name == "avx512.mask.cmp.pd.128") // Added in 7.0
474 return UpgradeX86MaskedFPCompare(F, Intrinsic::x86_avx512_mask_cmp_pd_128,
475 NewFn);
476 if (Name == "avx512.mask.cmp.pd.256") // Added in 7.0
477 return UpgradeX86MaskedFPCompare(F, Intrinsic::x86_avx512_mask_cmp_pd_256,
478 NewFn);
479 if (Name == "avx512.mask.cmp.pd.512") // Added in 7.0
480 return UpgradeX86MaskedFPCompare(F, Intrinsic::x86_avx512_mask_cmp_pd_512,
481 NewFn);
482 if (Name == "avx512.mask.cmp.ps.128") // Added in 7.0
483 return UpgradeX86MaskedFPCompare(F, Intrinsic::x86_avx512_mask_cmp_ps_128,
484 NewFn);
485 if (Name == "avx512.mask.cmp.ps.256") // Added in 7.0
486 return UpgradeX86MaskedFPCompare(F, Intrinsic::x86_avx512_mask_cmp_ps_256,
487 NewFn);
488 if (Name == "avx512.mask.cmp.ps.512") // Added in 7.0
489 return UpgradeX86MaskedFPCompare(F, Intrinsic::x86_avx512_mask_cmp_ps_512,
490 NewFn);
491
492 // frcz.ss/sd may need to have an argument dropped. Added in 3.2
493 if (Name.startswith("xop.vfrcz.ss") && F->arg_size() == 2) {
494 rename(F);
495 NewFn = Intrinsic::getDeclaration(F->getParent(),
496 Intrinsic::x86_xop_vfrcz_ss);
497 return true;
498 }
499 if (Name.startswith("xop.vfrcz.sd") && F->arg_size() == 2) {
500 rename(F);
501 NewFn = Intrinsic::getDeclaration(F->getParent(),
502 Intrinsic::x86_xop_vfrcz_sd);
503 return true;
504 }
505 // Upgrade any XOP PERMIL2 index operand still using a float/double vector.
506 if (Name.startswith("xop.vpermil2")) { // Added in 3.9
507 auto Idx = F->getFunctionType()->getParamType(2);
508 if (Idx->isFPOrFPVectorTy()) {
509 rename(F);
510 unsigned IdxSize = Idx->getPrimitiveSizeInBits();
511 unsigned EltSize = Idx->getScalarSizeInBits();
512 Intrinsic::ID Permil2ID;
513 if (EltSize == 64 && IdxSize == 128)
514 Permil2ID = Intrinsic::x86_xop_vpermil2pd;
515 else if (EltSize == 32 && IdxSize == 128)
516 Permil2ID = Intrinsic::x86_xop_vpermil2ps;
517 else if (EltSize == 64 && IdxSize == 256)
518 Permil2ID = Intrinsic::x86_xop_vpermil2pd_256;
519 else
520 Permil2ID = Intrinsic::x86_xop_vpermil2ps_256;
521 NewFn = Intrinsic::getDeclaration(F->getParent(), Permil2ID);
522 return true;
523 }
524 }
525
526 if (Name == "seh.recoverfp") {
527 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::eh_recoverfp);
528 return true;
529 }
530
531 return false;
532}
533
534static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
535 assert(F && "Illegal to upgrade a non-existent Function.")(static_cast <bool> (F && "Illegal to upgrade a non-existent Function."
) ? void (0) : __assert_fail ("F && \"Illegal to upgrade a non-existent Function.\""
, "llvm/lib/IR/AutoUpgrade.cpp", 535, __extension__ __PRETTY_FUNCTION__
))
;
536
537 // Quickly eliminate it, if it's not a candidate.
538 StringRef Name = F->getName();
539 if (Name.size() <= 8 || !Name.startswith("llvm."))
540 return false;
541 Name = Name.substr(5); // Strip off "llvm."
542
543 switch (Name[0]) {
544 default: break;
545 case 'a': {
546 if (Name.startswith("arm.rbit") || Name.startswith("aarch64.rbit")) {
547 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::bitreverse,
548 F->arg_begin()->getType());
549 return true;
550 }
551 if (Name.startswith("aarch64.neon.frintn")) {
552 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::roundeven,
553 F->arg_begin()->getType());
554 return true;
555 }
556 if (Name.startswith("aarch64.neon.rbit")) {
557 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::bitreverse,
558 F->arg_begin()->getType());
559 return true;
560 }
561 static const Regex LdRegex("^aarch64\\.sve\\.ld[234](.nxv[a-z0-9]+|$)");
562 if (LdRegex.match(Name)) {
563 Type *ScalarTy =
564 dyn_cast<VectorType>(F->getReturnType())->getElementType();
565 ElementCount EC =
566 dyn_cast<VectorType>(F->arg_begin()->getType())->getElementCount();
567 Type *Ty = VectorType::get(ScalarTy, EC);
568 Intrinsic::ID ID =
569 StringSwitch<Intrinsic::ID>(Name)
570 .StartsWith("aarch64.sve.ld2", Intrinsic::aarch64_sve_ld2_sret)
571 .StartsWith("aarch64.sve.ld3", Intrinsic::aarch64_sve_ld3_sret)
572 .StartsWith("aarch64.sve.ld4", Intrinsic::aarch64_sve_ld4_sret)
573 .Default(Intrinsic::not_intrinsic);
574 NewFn = Intrinsic::getDeclaration(F->getParent(), ID, Ty);
575 return true;
576 }
577 if (Name.startswith("aarch64.sve.tuple.get")) {
578 Type *Tys[] = {F->getReturnType(), F->arg_begin()->getType()};
579 NewFn = Intrinsic::getDeclaration(F->getParent(),
580 Intrinsic::vector_extract, Tys);
581 return true;
582 }
583 if (Name.startswith("aarch64.sve.tuple.set")) {
584 auto Args = F->getFunctionType()->params();
585 Type *Tys[] = {Args[0], Args[2], Args[1]};
586 NewFn = Intrinsic::getDeclaration(F->getParent(),
587 Intrinsic::vector_insert, Tys);
588 return true;
589 }
590 static const Regex CreateTupleRegex(
591 "^aarch64\\.sve\\.tuple\\.create[234](.nxv[a-z0-9]+|$)");
592 if (CreateTupleRegex.match(Name)) {
593 auto Args = F->getFunctionType()->params();
594 Type *Tys[] = {F->getReturnType(), Args[1]};
595 NewFn = Intrinsic::getDeclaration(F->getParent(),
596 Intrinsic::vector_insert, Tys);
597 return true;
598 }
599 if (Name.startswith("arm.neon.vclz")) {
600 Type* args[2] = {
601 F->arg_begin()->getType(),
602 Type::getInt1Ty(F->getContext())
603 };
604 // Can't use Intrinsic::getDeclaration here as it adds a ".i1" to
605 // the end of the name. Change name from llvm.arm.neon.vclz.* to
606 // llvm.ctlz.*
607 FunctionType* fType = FunctionType::get(F->getReturnType(), args, false);
608 NewFn = Function::Create(fType, F->getLinkage(), F->getAddressSpace(),
609 "llvm.ctlz." + Name.substr(14), F->getParent());
610 return true;
611 }
612 if (Name.startswith("arm.neon.vcnt")) {
613 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctpop,
614 F->arg_begin()->getType());
615 return true;
616 }
617 static const Regex vstRegex("^arm\\.neon\\.vst([1234]|[234]lane)\\.v[a-z0-9]*$");
618 if (vstRegex.match(Name)) {
619 static const Intrinsic::ID StoreInts[] = {Intrinsic::arm_neon_vst1,
620 Intrinsic::arm_neon_vst2,
621 Intrinsic::arm_neon_vst3,
622 Intrinsic::arm_neon_vst4};
623
624 static const Intrinsic::ID StoreLaneInts[] = {
625 Intrinsic::arm_neon_vst2lane, Intrinsic::arm_neon_vst3lane,
626 Intrinsic::arm_neon_vst4lane
627 };
628
629 auto fArgs = F->getFunctionType()->params();
630 Type *Tys[] = {fArgs[0], fArgs[1]};
631 if (!Name.contains("lane"))
632 NewFn = Intrinsic::getDeclaration(F->getParent(),
633 StoreInts[fArgs.size() - 3], Tys);
634 else
635 NewFn = Intrinsic::getDeclaration(F->getParent(),
636 StoreLaneInts[fArgs.size() - 5], Tys);
637 return true;
638 }
639 if (Name == "aarch64.thread.pointer" || Name == "arm.thread.pointer") {
640 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::thread_pointer);
641 return true;
642 }
643 if (Name.startswith("arm.neon.vqadds.")) {
644 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::sadd_sat,
645 F->arg_begin()->getType());
646 return true;
647 }
648 if (Name.startswith("arm.neon.vqaddu.")) {
649 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::uadd_sat,
650 F->arg_begin()->getType());
651 return true;
652 }
653 if (Name.startswith("arm.neon.vqsubs.")) {
654 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ssub_sat,
655 F->arg_begin()->getType());
656 return true;
657 }
658 if (Name.startswith("arm.neon.vqsubu.")) {
659 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::usub_sat,
660 F->arg_begin()->getType());
661 return true;
662 }
663 if (Name.startswith("aarch64.neon.addp")) {
664 if (F->arg_size() != 2)
665 break; // Invalid IR.
666 VectorType *Ty = dyn_cast<VectorType>(F->getReturnType());
667 if (Ty && Ty->getElementType()->isFloatingPointTy()) {
668 NewFn = Intrinsic::getDeclaration(F->getParent(),
669 Intrinsic::aarch64_neon_faddp, Ty);
670 return true;
671 }
672 }
673
674 // Changed in 12.0: bfdot accept v4bf16 and v8bf16 instead of v8i8 and v16i8
675 // respectively
676 if ((Name.startswith("arm.neon.bfdot.") ||
677 Name.startswith("aarch64.neon.bfdot.")) &&
678 Name.endswith("i8")) {
679 Intrinsic::ID IID =
680 StringSwitch<Intrinsic::ID>(Name)
681 .Cases("arm.neon.bfdot.v2f32.v8i8",
682 "arm.neon.bfdot.v4f32.v16i8",
683 Intrinsic::arm_neon_bfdot)
684 .Cases("aarch64.neon.bfdot.v2f32.v8i8",
685 "aarch64.neon.bfdot.v4f32.v16i8",
686 Intrinsic::aarch64_neon_bfdot)
687 .Default(Intrinsic::not_intrinsic);
688 if (IID == Intrinsic::not_intrinsic)
689 break;
690
691 size_t OperandWidth = F->getReturnType()->getPrimitiveSizeInBits();
692 assert((OperandWidth == 64 || OperandWidth == 128) &&(static_cast <bool> ((OperandWidth == 64 || OperandWidth
== 128) && "Unexpected operand width") ? void (0) : __assert_fail
("(OperandWidth == 64 || OperandWidth == 128) && \"Unexpected operand width\""
, "llvm/lib/IR/AutoUpgrade.cpp", 693, __extension__ __PRETTY_FUNCTION__
))
693 "Unexpected operand width")(static_cast <bool> ((OperandWidth == 64 || OperandWidth
== 128) && "Unexpected operand width") ? void (0) : __assert_fail
("(OperandWidth == 64 || OperandWidth == 128) && \"Unexpected operand width\""
, "llvm/lib/IR/AutoUpgrade.cpp", 693, __extension__ __PRETTY_FUNCTION__
))
;
694 LLVMContext &Ctx = F->getParent()->getContext();
695 std::array<Type *, 2> Tys {{
696 F->getReturnType(),
697 FixedVectorType::get(Type::getBFloatTy(Ctx), OperandWidth / 16)
698 }};
699 NewFn = Intrinsic::getDeclaration(F->getParent(), IID, Tys);
700 return true;
701 }
702
703 // Changed in 12.0: bfmmla, bfmlalb and bfmlalt are not polymorphic anymore
704 // and accept v8bf16 instead of v16i8
705 if ((Name.startswith("arm.neon.bfm") ||
706 Name.startswith("aarch64.neon.bfm")) &&
707 Name.endswith(".v4f32.v16i8")) {
708 Intrinsic::ID IID =
709 StringSwitch<Intrinsic::ID>(Name)
710 .Case("arm.neon.bfmmla.v4f32.v16i8",
711 Intrinsic::arm_neon_bfmmla)
712 .Case("arm.neon.bfmlalb.v4f32.v16i8",
713 Intrinsic::arm_neon_bfmlalb)
714 .Case("arm.neon.bfmlalt.v4f32.v16i8",
715 Intrinsic::arm_neon_bfmlalt)
716 .Case("aarch64.neon.bfmmla.v4f32.v16i8",
717 Intrinsic::aarch64_neon_bfmmla)
718 .Case("aarch64.neon.bfmlalb.v4f32.v16i8",
719 Intrinsic::aarch64_neon_bfmlalb)
720 .Case("aarch64.neon.bfmlalt.v4f32.v16i8",
721 Intrinsic::aarch64_neon_bfmlalt)
722 .Default(Intrinsic::not_intrinsic);
723 if (IID == Intrinsic::not_intrinsic)
724 break;
725
726 std::array<Type *, 0> Tys;
727 NewFn = Intrinsic::getDeclaration(F->getParent(), IID, Tys);
728 return true;
729 }
730
731 if (Name == "arm.mve.vctp64" &&
732 cast<FixedVectorType>(F->getReturnType())->getNumElements() == 4) {
733 // A vctp64 returning a v4i1 is converted to return a v2i1. Rename the
734 // function and deal with it below in UpgradeIntrinsicCall.
735 rename(F);
736 return true;
737 }
738 // These too are changed to accept a v2i1 insteead of the old v4i1.
739 if (Name == "arm.mve.mull.int.predicated.v2i64.v4i32.v4i1" ||
740 Name == "arm.mve.vqdmull.predicated.v2i64.v4i32.v4i1" ||
741 Name == "arm.mve.vldr.gather.base.predicated.v2i64.v2i64.v4i1" ||
742 Name == "arm.mve.vldr.gather.base.wb.predicated.v2i64.v2i64.v4i1" ||
743 Name == "arm.mve.vldr.gather.offset.predicated.v2i64.p0i64.v2i64.v4i1" ||
744 Name == "arm.mve.vstr.scatter.base.predicated.v2i64.v2i64.v4i1" ||
745 Name == "arm.mve.vstr.scatter.base.wb.predicated.v2i64.v2i64.v4i1" ||
746 Name == "arm.mve.vstr.scatter.offset.predicated.p0i64.v2i64.v2i64.v4i1" ||
747 Name == "arm.cde.vcx1q.predicated.v2i64.v4i1" ||
748 Name == "arm.cde.vcx1qa.predicated.v2i64.v4i1" ||
749 Name == "arm.cde.vcx2q.predicated.v2i64.v4i1" ||
750 Name == "arm.cde.vcx2qa.predicated.v2i64.v4i1" ||
751 Name == "arm.cde.vcx3q.predicated.v2i64.v4i1" ||
752 Name == "arm.cde.vcx3qa.predicated.v2i64.v4i1")
753 return true;
754
755 if (Name == "amdgcn.alignbit") {
756 // Target specific intrinsic became redundant
757 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::fshr,
758 {F->getReturnType()});
759 return true;
760 }
761
762 break;
763 }
764
765 case 'c': {
766 if (Name.startswith("ctlz.") && F->arg_size() == 1) {
767 rename(F);
768 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctlz,
769 F->arg_begin()->getType());
770 return true;
771 }
772 if (Name.startswith("cttz.") && F->arg_size() == 1) {
773 rename(F);
774 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::cttz,
775 F->arg_begin()->getType());
776 return true;
777 }
778 break;
779 }
780 case 'd': {
781 if (Name == "dbg.value" && F->arg_size() == 4) {
782 rename(F);
783 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::dbg_value);
784 return true;
785 }
786 break;
787 }
788 case 'e': {
789 if (Name.startswith("experimental.vector.extract.")) {
790 rename(F);
791 Type *Tys[] = {F->getReturnType(), F->arg_begin()->getType()};
792 NewFn = Intrinsic::getDeclaration(F->getParent(),
793 Intrinsic::vector_extract, Tys);
794 return true;
795 }
796
797 if (Name.startswith("experimental.vector.insert.")) {
798 rename(F);
799 auto Args = F->getFunctionType()->params();
800 Type *Tys[] = {Args[0], Args[1]};
801 NewFn = Intrinsic::getDeclaration(F->getParent(),
802 Intrinsic::vector_insert, Tys);
803 return true;
804 }
805
806 SmallVector<StringRef, 2> Groups;
807 static const Regex R("^experimental.vector.reduce.([a-z]+)\\.[a-z][0-9]+");
808 if (R.match(Name, &Groups)) {
809 Intrinsic::ID ID;
810 ID = StringSwitch<Intrinsic::ID>(Groups[1])
811 .Case("add", Intrinsic::vector_reduce_add)
812 .Case("mul", Intrinsic::vector_reduce_mul)
813 .Case("and", Intrinsic::vector_reduce_and)
814 .Case("or", Intrinsic::vector_reduce_or)
815 .Case("xor", Intrinsic::vector_reduce_xor)
816 .Case("smax", Intrinsic::vector_reduce_smax)
817 .Case("smin", Intrinsic::vector_reduce_smin)
818 .Case("umax", Intrinsic::vector_reduce_umax)
819 .Case("umin", Intrinsic::vector_reduce_umin)
820 .Case("fmax", Intrinsic::vector_reduce_fmax)
821 .Case("fmin", Intrinsic::vector_reduce_fmin)
822 .Default(Intrinsic::not_intrinsic);
823 if (ID != Intrinsic::not_intrinsic) {
824 rename(F);
825 auto Args = F->getFunctionType()->params();
826 NewFn = Intrinsic::getDeclaration(F->getParent(), ID, {Args[0]});
827 return true;
828 }
829 }
830 static const Regex R2(
831 "^experimental.vector.reduce.v2.([a-z]+)\\.[fi][0-9]+");
832 Groups.clear();
833 if (R2.match(Name, &Groups)) {
834 Intrinsic::ID ID = Intrinsic::not_intrinsic;
835 if (Groups[1] == "fadd")
836 ID = Intrinsic::vector_reduce_fadd;
837 if (Groups[1] == "fmul")
838 ID = Intrinsic::vector_reduce_fmul;
839 if (ID != Intrinsic::not_intrinsic) {
840 rename(F);
841 auto Args = F->getFunctionType()->params();
842 Type *Tys[] = {Args[1]};
843 NewFn = Intrinsic::getDeclaration(F->getParent(), ID, Tys);
844 return true;
845 }
846 }
847 break;
848 }
849 case 'i':
850 case 'l': {
851 bool IsLifetimeStart = Name.startswith("lifetime.start");
852 if (IsLifetimeStart || Name.startswith("invariant.start")) {
853 Intrinsic::ID ID = IsLifetimeStart ?
854 Intrinsic::lifetime_start : Intrinsic::invariant_start;
855 auto Args = F->getFunctionType()->params();
856 Type* ObjectPtr[1] = {Args[1]};
857 if (F->getName() != Intrinsic::getName(ID, ObjectPtr, F->getParent())) {
858 rename(F);
859 NewFn = Intrinsic::getDeclaration(F->getParent(), ID, ObjectPtr);
860 return true;
861 }
862 }
863
864 bool IsLifetimeEnd = Name.startswith("lifetime.end");
865 if (IsLifetimeEnd || Name.startswith("invariant.end")) {
866 Intrinsic::ID ID = IsLifetimeEnd ?
867 Intrinsic::lifetime_end : Intrinsic::invariant_end;
868
869 auto Args = F->getFunctionType()->params();
870 Type* ObjectPtr[1] = {Args[IsLifetimeEnd ? 1 : 2]};
871 if (F->getName() != Intrinsic::getName(ID, ObjectPtr, F->getParent())) {
872 rename(F);
873 NewFn = Intrinsic::getDeclaration(F->getParent(), ID, ObjectPtr);
874 return true;
875 }
876 }
877 if (Name.startswith("invariant.group.barrier")) {
878 // Rename invariant.group.barrier to launder.invariant.group
879 auto Args = F->getFunctionType()->params();
880 Type* ObjectPtr[1] = {Args[0]};
881 rename(F);
882 NewFn = Intrinsic::getDeclaration(F->getParent(),
883 Intrinsic::launder_invariant_group, ObjectPtr);
884 return true;
885
886 }
887
888 break;
889 }
890 case 'm': {
891 if (Name.startswith("masked.load.")) {
892 Type *Tys[] = { F->getReturnType(), F->arg_begin()->getType() };
893 if (F->getName() !=
894 Intrinsic::getName(Intrinsic::masked_load, Tys, F->getParent())) {
895 rename(F);
896 NewFn = Intrinsic::getDeclaration(F->getParent(),
897 Intrinsic::masked_load,
898 Tys);
899 return true;
900 }
901 }
902 if (Name.startswith("masked.store.")) {
903 auto Args = F->getFunctionType()->params();
904 Type *Tys[] = { Args[0], Args[1] };
905 if (F->getName() !=
906 Intrinsic::getName(Intrinsic::masked_store, Tys, F->getParent())) {
907 rename(F);
908 NewFn = Intrinsic::getDeclaration(F->getParent(),
909 Intrinsic::masked_store,
910 Tys);
911 return true;
912 }
913 }
914 // Renaming gather/scatter intrinsics with no address space overloading
915 // to the new overload which includes an address space
916 if (Name.startswith("masked.gather.")) {
917 Type *Tys[] = {F->getReturnType(), F->arg_begin()->getType()};
918 if (F->getName() !=
919 Intrinsic::getName(Intrinsic::masked_gather, Tys, F->getParent())) {
920 rename(F);
921 NewFn = Intrinsic::getDeclaration(F->getParent(),
922 Intrinsic::masked_gather, Tys);
923 return true;
924 }
925 }
926 if (Name.startswith("masked.scatter.")) {
927 auto Args = F->getFunctionType()->params();
928 Type *Tys[] = {Args[0], Args[1]};
929 if (F->getName() !=
930 Intrinsic::getName(Intrinsic::masked_scatter, Tys, F->getParent())) {
931 rename(F);
932 NewFn = Intrinsic::getDeclaration(F->getParent(),
933 Intrinsic::masked_scatter, Tys);
934 return true;
935 }
936 }
937 // Updating the memory intrinsics (memcpy/memmove/memset) that have an
938 // alignment parameter to embedding the alignment as an attribute of
939 // the pointer args.
940 if (Name.startswith("memcpy.") && F->arg_size() == 5) {
941 rename(F);
942 // Get the types of dest, src, and len
943 ArrayRef<Type *> ParamTypes = F->getFunctionType()->params().slice(0, 3);
944 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::memcpy,
945 ParamTypes);
946 return true;
947 }
948 if (Name.startswith("memmove.") && F->arg_size() == 5) {
949 rename(F);
950 // Get the types of dest, src, and len
951 ArrayRef<Type *> ParamTypes = F->getFunctionType()->params().slice(0, 3);
952 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::memmove,
953 ParamTypes);
954 return true;
955 }
956 if (Name.startswith("memset.") && F->arg_size() == 5) {
957 rename(F);
958 // Get the types of dest, and len
959 const auto *FT = F->getFunctionType();
960 Type *ParamTypes[2] = {
961 FT->getParamType(0), // Dest
962 FT->getParamType(2) // len
963 };
964 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::memset,
965 ParamTypes);
966 return true;
967 }
968 break;
969 }
970 case 'n': {
971 if (Name.startswith("nvvm.")) {
972 Name = Name.substr(5);
973
974 // The following nvvm intrinsics correspond exactly to an LLVM intrinsic.
975 Intrinsic::ID IID = StringSwitch<Intrinsic::ID>(Name)
976 .Cases("brev32", "brev64", Intrinsic::bitreverse)
977 .Case("clz.i", Intrinsic::ctlz)
978 .Case("popc.i", Intrinsic::ctpop)
979 .Default(Intrinsic::not_intrinsic);
980 if (IID != Intrinsic::not_intrinsic && F->arg_size() == 1) {
981 NewFn = Intrinsic::getDeclaration(F->getParent(), IID,
982 {F->getReturnType()});
983 return true;
984 }
985
986 // The following nvvm intrinsics correspond exactly to an LLVM idiom, but
987 // not to an intrinsic alone. We expand them in UpgradeIntrinsicCall.
988 //
989 // TODO: We could add lohi.i2d.
990 bool Expand = StringSwitch<bool>(Name)
991 .Cases("abs.i", "abs.ll", true)
992 .Cases("clz.ll", "popc.ll", "h2f", true)
993 .Cases("max.i", "max.ll", "max.ui", "max.ull", true)
994 .Cases("min.i", "min.ll", "min.ui", "min.ull", true)
995 .StartsWith("atomic.load.add.f32.p", true)
996 .StartsWith("atomic.load.add.f64.p", true)
997 .Default(false);
998 if (Expand) {
999 NewFn = nullptr;
1000 return true;
1001 }
1002 }
1003 break;
1004 }
1005 case 'o':
1006 // We only need to change the name to match the mangling including the
1007 // address space.
1008 if (Name.startswith("objectsize.")) {
1009 Type *Tys[2] = { F->getReturnType(), F->arg_begin()->getType() };
1010 if (F->arg_size() == 2 || F->arg_size() == 3 ||
1011 F->getName() !=
1012 Intrinsic::getName(Intrinsic::objectsize, Tys, F->getParent())) {
1013 rename(F);
1014 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::objectsize,
1015 Tys);
1016 return true;
1017 }
1018 }
1019 break;
1020
1021 case 'p':
1022 if (Name == "prefetch") {
1023 // Handle address space overloading.
1024 Type *Tys[] = {F->arg_begin()->getType()};
1025 if (F->getName() !=
1026 Intrinsic::getName(Intrinsic::prefetch, Tys, F->getParent())) {
1027 rename(F);
1028 NewFn =
1029 Intrinsic::getDeclaration(F->getParent(), Intrinsic::prefetch, Tys);
1030 return true;
1031 }
1032 } else if (Name.startswith("ptr.annotation.") && F->arg_size() == 4) {
1033 rename(F);
1034 NewFn = Intrinsic::getDeclaration(F->getParent(),
1035 Intrinsic::ptr_annotation,
1036 F->arg_begin()->getType());
1037 return true;
1038 }
1039 break;
1040
1041 case 's':
1042 if (Name == "stackprotectorcheck") {
1043 NewFn = nullptr;
1044 return true;
1045 }
1046 break;
1047
1048 case 'v': {
1049 if (Name == "var.annotation" && F->arg_size() == 4) {
1050 rename(F);
1051 NewFn = Intrinsic::getDeclaration(F->getParent(),
1052 Intrinsic::var_annotation);
1053 return true;
1054 }
1055 break;
1056 }
1057
1058 case 'x':
1059 if (UpgradeX86IntrinsicFunction(F, Name, NewFn))
1060 return true;
1061 }
1062
1063 auto *ST = dyn_cast<StructType>(F->getReturnType());
1064 if (ST && (!ST->isLiteral() || ST->isPacked())) {
1065 // Replace return type with literal non-packed struct. Only do this for
1066 // intrinsics declared to return a struct, not for intrinsics with
1067 // overloaded return type, in which case the exact struct type will be
1068 // mangled into the name.
1069 SmallVector<Intrinsic::IITDescriptor> Desc;
1070 Intrinsic::getIntrinsicInfoTableEntries(F->getIntrinsicID(), Desc);
1071 if (Desc.front().Kind == Intrinsic::IITDescriptor::Struct) {
1072 auto *FT = F->getFunctionType();
1073 auto *NewST = StructType::get(ST->getContext(), ST->elements());
1074 auto *NewFT = FunctionType::get(NewST, FT->params(), FT->isVarArg());
1075 std::string Name = F->getName().str();
1076 rename(F);
1077 NewFn = Function::Create(NewFT, F->getLinkage(), F->getAddressSpace(),
1078 Name, F->getParent());
1079
1080 // The new function may also need remangling.
1081 if (auto Result = llvm::Intrinsic::remangleIntrinsicFunction(F))
1082 NewFn = *Result;
1083 return true;
1084 }
1085 }
1086
1087 // Remangle our intrinsic since we upgrade the mangling
1088 auto Result = llvm::Intrinsic::remangleIntrinsicFunction(F);
1089 if (Result != None) {
1090 NewFn = *Result;
1091 return true;
1092 }
1093
1094 // This may not belong here. This function is effectively being overloaded
1095 // to both detect an intrinsic which needs upgrading, and to provide the
1096 // upgraded form of the intrinsic. We should perhaps have two separate
1097 // functions for this.
1098 return false;
1099}
1100
1101bool llvm::UpgradeIntrinsicFunction(Function *F, Function *&NewFn) {
1102 NewFn = nullptr;
1103 bool Upgraded = UpgradeIntrinsicFunction1(F, NewFn);
1104 assert(F != NewFn && "Intrinsic function upgraded to the same function")(static_cast <bool> (F != NewFn && "Intrinsic function upgraded to the same function"
) ? void (0) : __assert_fail ("F != NewFn && \"Intrinsic function upgraded to the same function\""
, "llvm/lib/IR/AutoUpgrade.cpp", 1104, __extension__ __PRETTY_FUNCTION__
))
;
1105
1106 // Upgrade intrinsic attributes. This does not change the function.
1107 if (NewFn)
1108 F = NewFn;
1109 if (Intrinsic::ID id = F->getIntrinsicID())
1110 F->setAttributes(Intrinsic::getAttributes(F->getContext(), id));
1111 return Upgraded;
1112}
1113
1114GlobalVariable *llvm::UpgradeGlobalVariable(GlobalVariable *GV) {
1115 if (!(GV->hasName() && (GV->getName() == "llvm.global_ctors" ||
1116 GV->getName() == "llvm.global_dtors")) ||
1117 !GV->hasInitializer())
1118 return nullptr;
1119 ArrayType *ATy = dyn_cast<ArrayType>(GV->getValueType());
1120 if (!ATy)
1121 return nullptr;
1122 StructType *STy = dyn_cast<StructType>(ATy->getElementType());
1123 if (!STy || STy->getNumElements() != 2)
1124 return nullptr;
1125
1126 LLVMContext &C = GV->getContext();
1127 IRBuilder<> IRB(C);
1128 auto EltTy = StructType::get(STy->getElementType(0), STy->getElementType(1),
1129 IRB.getInt8PtrTy());
1130 Constant *Init = GV->getInitializer();
1131 unsigned N = Init->getNumOperands();
1132 std::vector<Constant *> NewCtors(N);
1133 for (unsigned i = 0; i != N; ++i) {
1134 auto Ctor = cast<Constant>(Init->getOperand(i));
1135 NewCtors[i] = ConstantStruct::get(
1136 EltTy, Ctor->getAggregateElement(0u), Ctor->getAggregateElement(1),
1137 Constant::getNullValue(IRB.getInt8PtrTy()));
1138 }
1139 Constant *NewInit = ConstantArray::get(ArrayType::get(EltTy, N), NewCtors);
1140
1141 return new GlobalVariable(NewInit->getType(), false, GV->getLinkage(),
1142 NewInit, GV->getName());
1143}
1144
1145// Handles upgrading SSE2/AVX2/AVX512BW PSLLDQ intrinsics by converting them
1146// to byte shuffles.
1147static Value *UpgradeX86PSLLDQIntrinsics(IRBuilder<> &Builder,
1148 Value *Op, unsigned Shift) {
1149 auto *ResultTy = cast<FixedVectorType>(Op->getType());
1150 unsigned NumElts = ResultTy->getNumElements() * 8;
1151
1152 // Bitcast from a 64-bit element type to a byte element type.
1153 Type *VecTy = FixedVectorType::get(Builder.getInt8Ty(), NumElts);
1154 Op = Builder.CreateBitCast(Op, VecTy, "cast");
1155
1156 // We'll be shuffling in zeroes.
1157 Value *Res = Constant::getNullValue(VecTy);
1158
1159 // If shift is less than 16, emit a shuffle to move the bytes. Otherwise,
1160 // we'll just return the zero vector.
1161 if (Shift < 16) {
1162 int Idxs[64];
1163 // 256/512-bit version is split into 2/4 16-byte lanes.
1164 for (unsigned l = 0; l != NumElts; l += 16)
1165 for (unsigned i = 0; i != 16; ++i) {
1166 unsigned Idx = NumElts + i - Shift;
1167 if (Idx < NumElts)
1168 Idx -= NumElts - 16; // end of lane, switch operand.
1169 Idxs[l + i] = Idx + l;
1170 }
1171
1172 Res = Builder.CreateShuffleVector(Res, Op, makeArrayRef(Idxs, NumElts));
1173 }
1174
1175 // Bitcast back to a 64-bit element type.
1176 return Builder.CreateBitCast(Res, ResultTy, "cast");
1177}
1178
1179// Handles upgrading SSE2/AVX2/AVX512BW PSRLDQ intrinsics by converting them
1180// to byte shuffles.
1181static Value *UpgradeX86PSRLDQIntrinsics(IRBuilder<> &Builder, Value *Op,
1182 unsigned Shift) {
1183 auto *ResultTy = cast<FixedVectorType>(Op->getType());
1184 unsigned NumElts = ResultTy->getNumElements() * 8;
1185
1186 // Bitcast from a 64-bit element type to a byte element type.
1187 Type *VecTy = FixedVectorType::get(Builder.getInt8Ty(), NumElts);
1188 Op = Builder.CreateBitCast(Op, VecTy, "cast");
1189
1190 // We'll be shuffling in zeroes.
1191 Value *Res = Constant::getNullValue(VecTy);
1192
1193 // If shift is less than 16, emit a shuffle to move the bytes. Otherwise,
1194 // we'll just return the zero vector.
1195 if (Shift < 16) {
1196 int Idxs[64];
1197 // 256/512-bit version is split into 2/4 16-byte lanes.
1198 for (unsigned l = 0; l != NumElts; l += 16)
1199 for (unsigned i = 0; i != 16; ++i) {
1200 unsigned Idx = i + Shift;
1201 if (Idx >= 16)
1202 Idx += NumElts - 16; // end of lane, switch operand.
1203 Idxs[l + i] = Idx + l;
1204 }
1205
1206 Res = Builder.CreateShuffleVector(Op, Res, makeArrayRef(Idxs, NumElts));
1207 }
1208
1209 // Bitcast back to a 64-bit element type.
1210 return Builder.CreateBitCast(Res, ResultTy, "cast");
1211}
1212
1213static Value *getX86MaskVec(IRBuilder<> &Builder, Value *Mask,
1214 unsigned NumElts) {
1215 assert(isPowerOf2_32(NumElts) && "Expected power-of-2 mask elements")(static_cast <bool> (isPowerOf2_32(NumElts) && "Expected power-of-2 mask elements"
) ? void (0) : __assert_fail ("isPowerOf2_32(NumElts) && \"Expected power-of-2 mask elements\""
, "llvm/lib/IR/AutoUpgrade.cpp", 1215, __extension__ __PRETTY_FUNCTION__
))
;
1216 llvm::VectorType *MaskTy = FixedVectorType::get(
1217 Builder.getInt1Ty(), cast<IntegerType>(Mask->getType())->getBitWidth());
1218 Mask = Builder.CreateBitCast(Mask, MaskTy);
1219
1220 // If we have less than 8 elements (1, 2 or 4), then the starting mask was an
1221 // i8 and we need to extract down to the right number of elements.
1222 if (NumElts <= 4) {
1223 int Indices[4];
1224 for (unsigned i = 0; i != NumElts; ++i)
1225 Indices[i] = i;
1226 Mask = Builder.CreateShuffleVector(
1227 Mask, Mask, makeArrayRef(Indices, NumElts), "extract");
1228 }
1229
1230 return Mask;
1231}
1232
1233static Value *EmitX86Select(IRBuilder<> &Builder, Value *Mask,
1234 Value *Op0, Value *Op1) {
1235 // If the mask is all ones just emit the first operation.
1236 if (const auto *C = dyn_cast<Constant>(Mask))
1237 if (C->isAllOnesValue())
1238 return Op0;
1239
1240 Mask = getX86MaskVec(Builder, Mask,
1241 cast<FixedVectorType>(Op0->getType())->getNumElements());
1242 return Builder.CreateSelect(Mask, Op0, Op1);
1243}
1244
1245static Value *EmitX86ScalarSelect(IRBuilder<> &Builder, Value *Mask,
1246 Value *Op0, Value *Op1) {
1247 // If the mask is all ones just emit the first operation.
1248 if (const auto *C = dyn_cast<Constant>(Mask))
1249 if (C->isAllOnesValue())
1250 return Op0;
1251
1252 auto *MaskTy = FixedVectorType::get(Builder.getInt1Ty(),
1253 Mask->getType()->getIntegerBitWidth());
1254 Mask = Builder.CreateBitCast(Mask, MaskTy);
1255 Mask = Builder.CreateExtractElement(Mask, (uint64_t)0);
1256 return Builder.CreateSelect(Mask, Op0, Op1);
1257}
1258
1259// Handle autoupgrade for masked PALIGNR and VALIGND/Q intrinsics.
1260// PALIGNR handles large immediates by shifting while VALIGN masks the immediate
1261// so we need to handle both cases. VALIGN also doesn't have 128-bit lanes.
1262static Value *UpgradeX86ALIGNIntrinsics(IRBuilder<> &Builder, Value *Op0,
1263 Value *Op1, Value *Shift,
1264 Value *Passthru, Value *Mask,
1265 bool IsVALIGN) {
1266 unsigned ShiftVal = cast<llvm::ConstantInt>(Shift)->getZExtValue();
1267
1268 unsigned NumElts = cast<FixedVectorType>(Op0->getType())->getNumElements();
1269 assert((IsVALIGN || NumElts % 16 == 0) && "Illegal NumElts for PALIGNR!")(static_cast <bool> ((IsVALIGN || NumElts % 16 == 0) &&
"Illegal NumElts for PALIGNR!") ? void (0) : __assert_fail (
"(IsVALIGN || NumElts % 16 == 0) && \"Illegal NumElts for PALIGNR!\""
, "llvm/lib/IR/AutoUpgrade.cpp", 1269, __extension__ __PRETTY_FUNCTION__
))
;
1270 assert((!IsVALIGN || NumElts <= 16) && "NumElts too large for VALIGN!")(static_cast <bool> ((!IsVALIGN || NumElts <= 16) &&
"NumElts too large for VALIGN!") ? void (0) : __assert_fail (
"(!IsVALIGN || NumElts <= 16) && \"NumElts too large for VALIGN!\""
, "llvm/lib/IR/AutoUpgrade.cpp", 1270, __extension__ __PRETTY_FUNCTION__
))
;
1271 assert(isPowerOf2_32(NumElts) && "NumElts not a power of 2!")(static_cast <bool> (isPowerOf2_32(NumElts) && "NumElts not a power of 2!"
) ? void (0) : __assert_fail ("isPowerOf2_32(NumElts) && \"NumElts not a power of 2!\""
, "llvm/lib/IR/AutoUpgrade.cpp", 1271, __extension__ __PRETTY_FUNCTION__
))
;
1272
1273 // Mask the immediate for VALIGN.
1274 if (IsVALIGN)
1275 ShiftVal &= (NumElts - 1);
1276
1277 // If palignr is shifting the pair of vectors more than the size of two
1278 // lanes, emit zero.
1279 if (ShiftVal >= 32)
1280 return llvm::Constant::getNullValue(Op0->getType());
1281
1282 // If palignr is shifting the pair of input vectors more than one lane,
1283 // but less than two lanes, convert to shifting in zeroes.
1284 if (ShiftVal > 16) {
1285 ShiftVal -= 16;
1286 Op1 = Op0;
1287 Op0 = llvm::Constant::getNullValue(Op0->getType());
1288 }
1289
1290 int Indices[64];
1291 // 256-bit palignr operates on 128-bit lanes so we need to handle that
1292 for (unsigned l = 0; l < NumElts; l += 16) {
1293 for (unsigned i = 0; i != 16; ++i) {
1294 unsigned Idx = ShiftVal + i;
1295 if (!IsVALIGN && Idx >= 16) // Disable wrap for VALIGN.
1296 Idx += NumElts - 16; // End of lane, switch operand.
1297 Indices[l + i] = Idx + l;
1298 }
1299 }
1300
1301 Value *Align = Builder.CreateShuffleVector(Op1, Op0,
1302 makeArrayRef(Indices, NumElts),
1303 "palignr");
1304
1305 return EmitX86Select(Builder, Mask, Align, Passthru);
1306}
1307
1308static Value *UpgradeX86VPERMT2Intrinsics(IRBuilder<> &Builder, CallBase &CI,
1309 bool ZeroMask, bool IndexForm) {
1310 Type *Ty = CI.getType();
1311 unsigned VecWidth = Ty->getPrimitiveSizeInBits();
1312 unsigned EltWidth = Ty->getScalarSizeInBits();
1313 bool IsFloat = Ty->isFPOrFPVectorTy();
1314 Intrinsic::ID IID;
1315 if (VecWidth == 128 && EltWidth == 32 && IsFloat)
1316 IID = Intrinsic::x86_avx512_vpermi2var_ps_128;
1317 else if (VecWidth == 128 && EltWidth == 32 && !IsFloat)
1318 IID = Intrinsic::x86_avx512_vpermi2var_d_128;
1319 else if (VecWidth == 128 && EltWidth == 64 && IsFloat)
1320 IID = Intrinsic::x86_avx512_vpermi2var_pd_128;
1321 else if (VecWidth == 128 && EltWidth == 64 && !IsFloat)
1322 IID = Intrinsic::x86_avx512_vpermi2var_q_128;
1323 else if (VecWidth == 256 && EltWidth == 32 && IsFloat)
1324 IID = Intrinsic::x86_avx512_vpermi2var_ps_256;
1325 else if (VecWidth == 256 && EltWidth == 32 && !IsFloat)
1326 IID = Intrinsic::x86_avx512_vpermi2var_d_256;
1327 else if (VecWidth == 256 && EltWidth == 64 && IsFloat)
1328 IID = Intrinsic::x86_avx512_vpermi2var_pd_256;
1329 else if (VecWidth == 256 && EltWidth == 64 && !IsFloat)
1330 IID = Intrinsic::x86_avx512_vpermi2var_q_256;
1331 else if (VecWidth == 512 && EltWidth == 32 && IsFloat)
1332 IID = Intrinsic::x86_avx512_vpermi2var_ps_512;
1333 else if (VecWidth == 512 && EltWidth == 32 && !IsFloat)
1334 IID = Intrinsic::x86_avx512_vpermi2var_d_512;
1335 else if (VecWidth == 512 && EltWidth == 64 && IsFloat)
1336 IID = Intrinsic::x86_avx512_vpermi2var_pd_512;
1337 else if (VecWidth == 512 && EltWidth == 64 && !IsFloat)
1338 IID = Intrinsic::x86_avx512_vpermi2var_q_512;
1339 else if (VecWidth == 128 && EltWidth == 16)
1340 IID = Intrinsic::x86_avx512_vpermi2var_hi_128;
1341 else if (VecWidth == 256 && EltWidth == 16)
1342 IID = Intrinsic::x86_avx512_vpermi2var_hi_256;
1343 else if (VecWidth == 512 && EltWidth == 16)
1344 IID = Intrinsic::x86_avx512_vpermi2var_hi_512;
1345 else if (VecWidth == 128 && EltWidth == 8)
1346 IID = Intrinsic::x86_avx512_vpermi2var_qi_128;
1347 else if (VecWidth == 256 && EltWidth == 8)
1348 IID = Intrinsic::x86_avx512_vpermi2var_qi_256;
1349 else if (VecWidth == 512 && EltWidth == 8)
1350 IID = Intrinsic::x86_avx512_vpermi2var_qi_512;
1351 else
1352 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1352)
;
1353
1354 Value *Args[] = { CI.getArgOperand(0) , CI.getArgOperand(1),
1355 CI.getArgOperand(2) };
1356
1357 // If this isn't index form we need to swap operand 0 and 1.
1358 if (!IndexForm)
1359 std::swap(Args[0], Args[1]);
1360
1361 Value *V = Builder.CreateCall(Intrinsic::getDeclaration(CI.getModule(), IID),
1362 Args);
1363 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(Ty)
1364 : Builder.CreateBitCast(CI.getArgOperand(1),
1365 Ty);
1366 return EmitX86Select(Builder, CI.getArgOperand(3), V, PassThru);
1367}
1368
1369static Value *UpgradeX86BinaryIntrinsics(IRBuilder<> &Builder, CallBase &CI,
1370 Intrinsic::ID IID) {
1371 Type *Ty = CI.getType();
1372 Value *Op0 = CI.getOperand(0);
1373 Value *Op1 = CI.getOperand(1);
1374 Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID, Ty);
1375 Value *Res = Builder.CreateCall(Intrin, {Op0, Op1});
1376
1377 if (CI.arg_size() == 4) { // For masked intrinsics.
1378 Value *VecSrc = CI.getOperand(2);
1379 Value *Mask = CI.getOperand(3);
1380 Res = EmitX86Select(Builder, Mask, Res, VecSrc);
1381 }
1382 return Res;
1383}
1384
1385static Value *upgradeX86Rotate(IRBuilder<> &Builder, CallBase &CI,
1386 bool IsRotateRight) {
1387 Type *Ty = CI.getType();
1388 Value *Src = CI.getArgOperand(0);
1389 Value *Amt = CI.getArgOperand(1);
1390
1391 // Amount may be scalar immediate, in which case create a splat vector.
1392 // Funnel shifts amounts are treated as modulo and types are all power-of-2 so
1393 // we only care about the lowest log2 bits anyway.
1394 if (Amt->getType() != Ty) {
1395 unsigned NumElts = cast<FixedVectorType>(Ty)->getNumElements();
1396 Amt = Builder.CreateIntCast(Amt, Ty->getScalarType(), false);
1397 Amt = Builder.CreateVectorSplat(NumElts, Amt);
1398 }
1399
1400 Intrinsic::ID IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl;
1401 Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID, Ty);
1402 Value *Res = Builder.CreateCall(Intrin, {Src, Src, Amt});
1403
1404 if (CI.arg_size() == 4) { // For masked intrinsics.
1405 Value *VecSrc = CI.getOperand(2);
1406 Value *Mask = CI.getOperand(3);
1407 Res = EmitX86Select(Builder, Mask, Res, VecSrc);
1408 }
1409 return Res;
1410}
1411
1412static Value *upgradeX86vpcom(IRBuilder<> &Builder, CallBase &CI, unsigned Imm,
1413 bool IsSigned) {
1414 Type *Ty = CI.getType();
1415 Value *LHS = CI.getArgOperand(0);
1416 Value *RHS = CI.getArgOperand(1);
1417
1418 CmpInst::Predicate Pred;
1419 switch (Imm) {
1420 case 0x0:
1421 Pred = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
1422 break;
1423 case 0x1:
1424 Pred = IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
1425 break;
1426 case 0x2:
1427 Pred = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
1428 break;
1429 case 0x3:
1430 Pred = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
1431 break;
1432 case 0x4:
1433 Pred = ICmpInst::ICMP_EQ;
1434 break;
1435 case 0x5:
1436 Pred = ICmpInst::ICMP_NE;
1437 break;
1438 case 0x6:
1439 return Constant::getNullValue(Ty); // FALSE
1440 case 0x7:
1441 return Constant::getAllOnesValue(Ty); // TRUE
1442 default:
1443 llvm_unreachable("Unknown XOP vpcom/vpcomu predicate")::llvm::llvm_unreachable_internal("Unknown XOP vpcom/vpcomu predicate"
, "llvm/lib/IR/AutoUpgrade.cpp", 1443)
;
1444 }
1445
1446 Value *Cmp = Builder.CreateICmp(Pred, LHS, RHS);
1447 Value *Ext = Builder.CreateSExt(Cmp, Ty);
1448 return Ext;
1449}
1450
1451static Value *upgradeX86ConcatShift(IRBuilder<> &Builder, CallBase &CI,
1452 bool IsShiftRight, bool ZeroMask) {
1453 Type *Ty = CI.getType();
1454 Value *Op0 = CI.getArgOperand(0);
1455 Value *Op1 = CI.getArgOperand(1);
1456 Value *Amt = CI.getArgOperand(2);
1457
1458 if (IsShiftRight)
1459 std::swap(Op0, Op1);
1460
1461 // Amount may be scalar immediate, in which case create a splat vector.
1462 // Funnel shifts amounts are treated as modulo and types are all power-of-2 so
1463 // we only care about the lowest log2 bits anyway.
1464 if (Amt->getType() != Ty) {
1465 unsigned NumElts = cast<FixedVectorType>(Ty)->getNumElements();
1466 Amt = Builder.CreateIntCast(Amt, Ty->getScalarType(), false);
1467 Amt = Builder.CreateVectorSplat(NumElts, Amt);
1468 }
1469
1470 Intrinsic::ID IID = IsShiftRight ? Intrinsic::fshr : Intrinsic::fshl;
1471 Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID, Ty);
1472 Value *Res = Builder.CreateCall(Intrin, {Op0, Op1, Amt});
1473
1474 unsigned NumArgs = CI.arg_size();
1475 if (NumArgs >= 4) { // For masked intrinsics.
1476 Value *VecSrc = NumArgs == 5 ? CI.getArgOperand(3) :
1477 ZeroMask ? ConstantAggregateZero::get(CI.getType()) :
1478 CI.getArgOperand(0);
1479 Value *Mask = CI.getOperand(NumArgs - 1);
1480 Res = EmitX86Select(Builder, Mask, Res, VecSrc);
1481 }
1482 return Res;
1483}
1484
1485static Value *UpgradeMaskedStore(IRBuilder<> &Builder,
1486 Value *Ptr, Value *Data, Value *Mask,
1487 bool Aligned) {
1488 // Cast the pointer to the right type.
1489 Ptr = Builder.CreateBitCast(Ptr,
1490 llvm::PointerType::getUnqual(Data->getType()));
1491 const Align Alignment =
1492 Aligned
1493 ? Align(Data->getType()->getPrimitiveSizeInBits().getFixedSize() / 8)
1494 : Align(1);
1495
1496 // If the mask is all ones just emit a regular store.
1497 if (const auto *C = dyn_cast<Constant>(Mask))
1498 if (C->isAllOnesValue())
1499 return Builder.CreateAlignedStore(Data, Ptr, Alignment);
1500
1501 // Convert the mask from an integer type to a vector of i1.
1502 unsigned NumElts = cast<FixedVectorType>(Data->getType())->getNumElements();
1503 Mask = getX86MaskVec(Builder, Mask, NumElts);
1504 return Builder.CreateMaskedStore(Data, Ptr, Alignment, Mask);
1505}
1506
1507static Value *UpgradeMaskedLoad(IRBuilder<> &Builder,
1508 Value *Ptr, Value *Passthru, Value *Mask,
1509 bool Aligned) {
1510 Type *ValTy = Passthru->getType();
1511 // Cast the pointer to the right type.
1512 Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(ValTy));
1513 const Align Alignment =
1514 Aligned
1515 ? Align(Passthru->getType()->getPrimitiveSizeInBits().getFixedSize() /
1516 8)
1517 : Align(1);
1518
1519 // If the mask is all ones just emit a regular store.
1520 if (const auto *C = dyn_cast<Constant>(Mask))
1521 if (C->isAllOnesValue())
1522 return Builder.CreateAlignedLoad(ValTy, Ptr, Alignment);
1523
1524 // Convert the mask from an integer type to a vector of i1.
1525 unsigned NumElts = cast<FixedVectorType>(ValTy)->getNumElements();
1526 Mask = getX86MaskVec(Builder, Mask, NumElts);
1527 return Builder.CreateMaskedLoad(ValTy, Ptr, Alignment, Mask, Passthru);
1528}
1529
1530static Value *upgradeAbs(IRBuilder<> &Builder, CallBase &CI) {
1531 Type *Ty = CI.getType();
1532 Value *Op0 = CI.getArgOperand(0);
1533 Function *F = Intrinsic::getDeclaration(CI.getModule(), Intrinsic::abs, Ty);
1534 Value *Res = Builder.CreateCall(F, {Op0, Builder.getInt1(false)});
1535 if (CI.arg_size() == 3)
1536 Res = EmitX86Select(Builder, CI.getArgOperand(2), Res, CI.getArgOperand(1));
1537 return Res;
1538}
1539
1540static Value *upgradePMULDQ(IRBuilder<> &Builder, CallBase &CI, bool IsSigned) {
1541 Type *Ty = CI.getType();
1542
1543 // Arguments have a vXi32 type so cast to vXi64.
1544 Value *LHS = Builder.CreateBitCast(CI.getArgOperand(0), Ty);
1545 Value *RHS = Builder.CreateBitCast(CI.getArgOperand(1), Ty);
1546
1547 if (IsSigned) {
1548 // Shift left then arithmetic shift right.
1549 Constant *ShiftAmt = ConstantInt::get(Ty, 32);
1550 LHS = Builder.CreateShl(LHS, ShiftAmt);
1551 LHS = Builder.CreateAShr(LHS, ShiftAmt);
1552 RHS = Builder.CreateShl(RHS, ShiftAmt);
1553 RHS = Builder.CreateAShr(RHS, ShiftAmt);
1554 } else {
1555 // Clear the upper bits.
1556 Constant *Mask = ConstantInt::get(Ty, 0xffffffff);
1557 LHS = Builder.CreateAnd(LHS, Mask);
1558 RHS = Builder.CreateAnd(RHS, Mask);
1559 }
1560
1561 Value *Res = Builder.CreateMul(LHS, RHS);
1562
1563 if (CI.arg_size() == 4)
1564 Res = EmitX86Select(Builder, CI.getArgOperand(3), Res, CI.getArgOperand(2));
1565
1566 return Res;
1567}
1568
1569// Applying mask on vector of i1's and make sure result is at least 8 bits wide.
1570static Value *ApplyX86MaskOn1BitsVec(IRBuilder<> &Builder, Value *Vec,
1571 Value *Mask) {
1572 unsigned NumElts = cast<FixedVectorType>(Vec->getType())->getNumElements();
7
The object is a 'CastReturnType'
8
'NumElts' initialized here
1573 if (Mask
8.1
'Mask' is null
) {
9
Taking false branch
1574 const auto *C = dyn_cast<Constant>(Mask);
1575 if (!C || !C->isAllOnesValue())
1576 Vec = Builder.CreateAnd(Vec, getX86MaskVec(Builder, Mask, NumElts));
1577 }
1578
1579 if (NumElts < 8) {
10
Assuming 'NumElts' is < 8
11
Taking true branch
1580 int Indices[8];
1581 for (unsigned i = 0; i != NumElts; ++i)
12
Assuming 'i' is equal to 'NumElts'
13
Loop condition is false. Execution continues on line 1583
1582 Indices[i] = i;
1583 for (unsigned i = NumElts; i != 8; ++i)
14
Loop condition is true. Entering loop body
1584 Indices[i] = NumElts + i % NumElts;
15
Division by zero
1585 Vec = Builder.CreateShuffleVector(Vec,
1586 Constant::getNullValue(Vec->getType()),
1587 Indices);
1588 }
1589 return Builder.CreateBitCast(Vec, Builder.getIntNTy(std::max(NumElts, 8U)));
1590}
1591
1592static Value *upgradeMaskedCompare(IRBuilder<> &Builder, CallBase &CI,
1593 unsigned CC, bool Signed) {
1594 Value *Op0 = CI.getArgOperand(0);
1595 unsigned NumElts = cast<FixedVectorType>(Op0->getType())->getNumElements();
1
The object is a 'CastReturnType'
1596
1597 Value *Cmp;
1598 if (CC == 3) {
2
Assuming 'CC' is not equal to 3
3
Taking false branch
1599 Cmp = Constant::getNullValue(
1600 FixedVectorType::get(Builder.getInt1Ty(), NumElts));
1601 } else if (CC == 7) {
4
Assuming 'CC' is equal to 7
5
Taking true branch
1602 Cmp = Constant::getAllOnesValue(
1603 FixedVectorType::get(Builder.getInt1Ty(), NumElts));
1604 } else {
1605 ICmpInst::Predicate Pred;
1606 switch (CC) {
1607 default: llvm_unreachable("Unknown condition code")::llvm::llvm_unreachable_internal("Unknown condition code", "llvm/lib/IR/AutoUpgrade.cpp"
, 1607)
;
1608 case 0: Pred = ICmpInst::ICMP_EQ; break;
1609 case 1: Pred = Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; break;
1610 case 2: Pred = Signed ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; break;
1611 case 4: Pred = ICmpInst::ICMP_NE; break;
1612 case 5: Pred = Signed ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; break;
1613 case 6: Pred = Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; break;
1614 }
1615 Cmp = Builder.CreateICmp(Pred, Op0, CI.getArgOperand(1));
1616 }
1617
1618 Value *Mask = CI.getArgOperand(CI.arg_size() - 1);
1619
1620 return ApplyX86MaskOn1BitsVec(Builder, Cmp, Mask);
6
Calling 'ApplyX86MaskOn1BitsVec'
1621}
1622
1623// Replace a masked intrinsic with an older unmasked intrinsic.
1624static Value *UpgradeX86MaskedShift(IRBuilder<> &Builder, CallBase &CI,
1625 Intrinsic::ID IID) {
1626 Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID);
1627 Value *Rep = Builder.CreateCall(Intrin,
1628 { CI.getArgOperand(0), CI.getArgOperand(1) });
1629 return EmitX86Select(Builder, CI.getArgOperand(3), Rep, CI.getArgOperand(2));
1630}
1631
1632static Value* upgradeMaskedMove(IRBuilder<> &Builder, CallBase &CI) {
1633 Value* A = CI.getArgOperand(0);
1634 Value* B = CI.getArgOperand(1);
1635 Value* Src = CI.getArgOperand(2);
1636 Value* Mask = CI.getArgOperand(3);
1637
1638 Value* AndNode = Builder.CreateAnd(Mask, APInt(8, 1));
1639 Value* Cmp = Builder.CreateIsNotNull(AndNode);
1640 Value* Extract1 = Builder.CreateExtractElement(B, (uint64_t)0);
1641 Value* Extract2 = Builder.CreateExtractElement(Src, (uint64_t)0);
1642 Value* Select = Builder.CreateSelect(Cmp, Extract1, Extract2);
1643 return Builder.CreateInsertElement(A, Select, (uint64_t)0);
1644}
1645
1646
1647static Value* UpgradeMaskToInt(IRBuilder<> &Builder, CallBase &CI) {
1648 Value* Op = CI.getArgOperand(0);
1649 Type* ReturnOp = CI.getType();
1650 unsigned NumElts = cast<FixedVectorType>(CI.getType())->getNumElements();
1651 Value *Mask = getX86MaskVec(Builder, Op, NumElts);
1652 return Builder.CreateSExt(Mask, ReturnOp, "vpmovm2");
1653}
1654
1655// Replace intrinsic with unmasked version and a select.
1656static bool upgradeAVX512MaskToSelect(StringRef Name, IRBuilder<> &Builder,
1657 CallBase &CI, Value *&Rep) {
1658 Name = Name.substr(12); // Remove avx512.mask.
1659
1660 unsigned VecWidth = CI.getType()->getPrimitiveSizeInBits();
1661 unsigned EltWidth = CI.getType()->getScalarSizeInBits();
1662 Intrinsic::ID IID;
1663 if (Name.startswith("max.p")) {
1664 if (VecWidth == 128 && EltWidth == 32)
1665 IID = Intrinsic::x86_sse_max_ps;
1666 else if (VecWidth == 128 && EltWidth == 64)
1667 IID = Intrinsic::x86_sse2_max_pd;
1668 else if (VecWidth == 256 && EltWidth == 32)
1669 IID = Intrinsic::x86_avx_max_ps_256;
1670 else if (VecWidth == 256 && EltWidth == 64)
1671 IID = Intrinsic::x86_avx_max_pd_256;
1672 else
1673 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1673)
;
1674 } else if (Name.startswith("min.p")) {
1675 if (VecWidth == 128 && EltWidth == 32)
1676 IID = Intrinsic::x86_sse_min_ps;
1677 else if (VecWidth == 128 && EltWidth == 64)
1678 IID = Intrinsic::x86_sse2_min_pd;
1679 else if (VecWidth == 256 && EltWidth == 32)
1680 IID = Intrinsic::x86_avx_min_ps_256;
1681 else if (VecWidth == 256 && EltWidth == 64)
1682 IID = Intrinsic::x86_avx_min_pd_256;
1683 else
1684 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1684)
;
1685 } else if (Name.startswith("pshuf.b.")) {
1686 if (VecWidth == 128)
1687 IID = Intrinsic::x86_ssse3_pshuf_b_128;
1688 else if (VecWidth == 256)
1689 IID = Intrinsic::x86_avx2_pshuf_b;
1690 else if (VecWidth == 512)
1691 IID = Intrinsic::x86_avx512_pshuf_b_512;
1692 else
1693 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1693)
;
1694 } else if (Name.startswith("pmul.hr.sw.")) {
1695 if (VecWidth == 128)
1696 IID = Intrinsic::x86_ssse3_pmul_hr_sw_128;
1697 else if (VecWidth == 256)
1698 IID = Intrinsic::x86_avx2_pmul_hr_sw;
1699 else if (VecWidth == 512)
1700 IID = Intrinsic::x86_avx512_pmul_hr_sw_512;
1701 else
1702 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1702)
;
1703 } else if (Name.startswith("pmulh.w.")) {
1704 if (VecWidth == 128)
1705 IID = Intrinsic::x86_sse2_pmulh_w;
1706 else if (VecWidth == 256)
1707 IID = Intrinsic::x86_avx2_pmulh_w;
1708 else if (VecWidth == 512)
1709 IID = Intrinsic::x86_avx512_pmulh_w_512;
1710 else
1711 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1711)
;
1712 } else if (Name.startswith("pmulhu.w.")) {
1713 if (VecWidth == 128)
1714 IID = Intrinsic::x86_sse2_pmulhu_w;
1715 else if (VecWidth == 256)
1716 IID = Intrinsic::x86_avx2_pmulhu_w;
1717 else if (VecWidth == 512)
1718 IID = Intrinsic::x86_avx512_pmulhu_w_512;
1719 else
1720 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1720)
;
1721 } else if (Name.startswith("pmaddw.d.")) {
1722 if (VecWidth == 128)
1723 IID = Intrinsic::x86_sse2_pmadd_wd;
1724 else if (VecWidth == 256)
1725 IID = Intrinsic::x86_avx2_pmadd_wd;
1726 else if (VecWidth == 512)
1727 IID = Intrinsic::x86_avx512_pmaddw_d_512;
1728 else
1729 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1729)
;
1730 } else if (Name.startswith("pmaddubs.w.")) {
1731 if (VecWidth == 128)
1732 IID = Intrinsic::x86_ssse3_pmadd_ub_sw_128;
1733 else if (VecWidth == 256)
1734 IID = Intrinsic::x86_avx2_pmadd_ub_sw;
1735 else if (VecWidth == 512)
1736 IID = Intrinsic::x86_avx512_pmaddubs_w_512;
1737 else
1738 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1738)
;
1739 } else if (Name.startswith("packsswb.")) {
1740 if (VecWidth == 128)
1741 IID = Intrinsic::x86_sse2_packsswb_128;
1742 else if (VecWidth == 256)
1743 IID = Intrinsic::x86_avx2_packsswb;
1744 else if (VecWidth == 512)
1745 IID = Intrinsic::x86_avx512_packsswb_512;
1746 else
1747 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1747)
;
1748 } else if (Name.startswith("packssdw.")) {
1749 if (VecWidth == 128)
1750 IID = Intrinsic::x86_sse2_packssdw_128;
1751 else if (VecWidth == 256)
1752 IID = Intrinsic::x86_avx2_packssdw;
1753 else if (VecWidth == 512)
1754 IID = Intrinsic::x86_avx512_packssdw_512;
1755 else
1756 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1756)
;
1757 } else if (Name.startswith("packuswb.")) {
1758 if (VecWidth == 128)
1759 IID = Intrinsic::x86_sse2_packuswb_128;
1760 else if (VecWidth == 256)
1761 IID = Intrinsic::x86_avx2_packuswb;
1762 else if (VecWidth == 512)
1763 IID = Intrinsic::x86_avx512_packuswb_512;
1764 else
1765 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1765)
;
1766 } else if (Name.startswith("packusdw.")) {
1767 if (VecWidth == 128)
1768 IID = Intrinsic::x86_sse41_packusdw;
1769 else if (VecWidth == 256)
1770 IID = Intrinsic::x86_avx2_packusdw;
1771 else if (VecWidth == 512)
1772 IID = Intrinsic::x86_avx512_packusdw_512;
1773 else
1774 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1774)
;
1775 } else if (Name.startswith("vpermilvar.")) {
1776 if (VecWidth == 128 && EltWidth == 32)
1777 IID = Intrinsic::x86_avx_vpermilvar_ps;
1778 else if (VecWidth == 128 && EltWidth == 64)
1779 IID = Intrinsic::x86_avx_vpermilvar_pd;
1780 else if (VecWidth == 256 && EltWidth == 32)
1781 IID = Intrinsic::x86_avx_vpermilvar_ps_256;
1782 else if (VecWidth == 256 && EltWidth == 64)
1783 IID = Intrinsic::x86_avx_vpermilvar_pd_256;
1784 else if (VecWidth == 512 && EltWidth == 32)
1785 IID = Intrinsic::x86_avx512_vpermilvar_ps_512;
1786 else if (VecWidth == 512 && EltWidth == 64)
1787 IID = Intrinsic::x86_avx512_vpermilvar_pd_512;
1788 else
1789 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1789)
;
1790 } else if (Name == "cvtpd2dq.256") {
1791 IID = Intrinsic::x86_avx_cvt_pd2dq_256;
1792 } else if (Name == "cvtpd2ps.256") {
1793 IID = Intrinsic::x86_avx_cvt_pd2_ps_256;
1794 } else if (Name == "cvttpd2dq.256") {
1795 IID = Intrinsic::x86_avx_cvtt_pd2dq_256;
1796 } else if (Name == "cvttps2dq.128") {
1797 IID = Intrinsic::x86_sse2_cvttps2dq;
1798 } else if (Name == "cvttps2dq.256") {
1799 IID = Intrinsic::x86_avx_cvtt_ps2dq_256;
1800 } else if (Name.startswith("permvar.")) {
1801 bool IsFloat = CI.getType()->isFPOrFPVectorTy();
1802 if (VecWidth == 256 && EltWidth == 32 && IsFloat)
1803 IID = Intrinsic::x86_avx2_permps;
1804 else if (VecWidth == 256 && EltWidth == 32 && !IsFloat)
1805 IID = Intrinsic::x86_avx2_permd;
1806 else if (VecWidth == 256 && EltWidth == 64 && IsFloat)
1807 IID = Intrinsic::x86_avx512_permvar_df_256;
1808 else if (VecWidth == 256 && EltWidth == 64 && !IsFloat)
1809 IID = Intrinsic::x86_avx512_permvar_di_256;
1810 else if (VecWidth == 512 && EltWidth == 32 && IsFloat)
1811 IID = Intrinsic::x86_avx512_permvar_sf_512;
1812 else if (VecWidth == 512 && EltWidth == 32 && !IsFloat)
1813 IID = Intrinsic::x86_avx512_permvar_si_512;
1814 else if (VecWidth == 512 && EltWidth == 64 && IsFloat)
1815 IID = Intrinsic::x86_avx512_permvar_df_512;
1816 else if (VecWidth == 512 && EltWidth == 64 && !IsFloat)
1817 IID = Intrinsic::x86_avx512_permvar_di_512;
1818 else if (VecWidth == 128 && EltWidth == 16)
1819 IID = Intrinsic::x86_avx512_permvar_hi_128;
1820 else if (VecWidth == 256 && EltWidth == 16)
1821 IID = Intrinsic::x86_avx512_permvar_hi_256;
1822 else if (VecWidth == 512 && EltWidth == 16)
1823 IID = Intrinsic::x86_avx512_permvar_hi_512;
1824 else if (VecWidth == 128 && EltWidth == 8)
1825 IID = Intrinsic::x86_avx512_permvar_qi_128;
1826 else if (VecWidth == 256 && EltWidth == 8)
1827 IID = Intrinsic::x86_avx512_permvar_qi_256;
1828 else if (VecWidth == 512 && EltWidth == 8)
1829 IID = Intrinsic::x86_avx512_permvar_qi_512;
1830 else
1831 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1831)
;
1832 } else if (Name.startswith("dbpsadbw.")) {
1833 if (VecWidth == 128)
1834 IID = Intrinsic::x86_avx512_dbpsadbw_128;
1835 else if (VecWidth == 256)
1836 IID = Intrinsic::x86_avx512_dbpsadbw_256;
1837 else if (VecWidth == 512)
1838 IID = Intrinsic::x86_avx512_dbpsadbw_512;
1839 else
1840 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1840)
;
1841 } else if (Name.startswith("pmultishift.qb.")) {
1842 if (VecWidth == 128)
1843 IID = Intrinsic::x86_avx512_pmultishift_qb_128;
1844 else if (VecWidth == 256)
1845 IID = Intrinsic::x86_avx512_pmultishift_qb_256;
1846 else if (VecWidth == 512)
1847 IID = Intrinsic::x86_avx512_pmultishift_qb_512;
1848 else
1849 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1849)
;
1850 } else if (Name.startswith("conflict.")) {
1851 if (Name[9] == 'd' && VecWidth == 128)
1852 IID = Intrinsic::x86_avx512_conflict_d_128;
1853 else if (Name[9] == 'd' && VecWidth == 256)
1854 IID = Intrinsic::x86_avx512_conflict_d_256;
1855 else if (Name[9] == 'd' && VecWidth == 512)
1856 IID = Intrinsic::x86_avx512_conflict_d_512;
1857 else if (Name[9] == 'q' && VecWidth == 128)
1858 IID = Intrinsic::x86_avx512_conflict_q_128;
1859 else if (Name[9] == 'q' && VecWidth == 256)
1860 IID = Intrinsic::x86_avx512_conflict_q_256;
1861 else if (Name[9] == 'q' && VecWidth == 512)
1862 IID = Intrinsic::x86_avx512_conflict_q_512;
1863 else
1864 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1864)
;
1865 } else if (Name.startswith("pavg.")) {
1866 if (Name[5] == 'b' && VecWidth == 128)
1867 IID = Intrinsic::x86_sse2_pavg_b;
1868 else if (Name[5] == 'b' && VecWidth == 256)
1869 IID = Intrinsic::x86_avx2_pavg_b;
1870 else if (Name[5] == 'b' && VecWidth == 512)
1871 IID = Intrinsic::x86_avx512_pavg_b_512;
1872 else if (Name[5] == 'w' && VecWidth == 128)
1873 IID = Intrinsic::x86_sse2_pavg_w;
1874 else if (Name[5] == 'w' && VecWidth == 256)
1875 IID = Intrinsic::x86_avx2_pavg_w;
1876 else if (Name[5] == 'w' && VecWidth == 512)
1877 IID = Intrinsic::x86_avx512_pavg_w_512;
1878 else
1879 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1879)
;
1880 } else
1881 return false;
1882
1883 SmallVector<Value *, 4> Args(CI.args());
1884 Args.pop_back();
1885 Args.pop_back();
1886 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI.getModule(), IID),
1887 Args);
1888 unsigned NumArgs = CI.arg_size();
1889 Rep = EmitX86Select(Builder, CI.getArgOperand(NumArgs - 1), Rep,
1890 CI.getArgOperand(NumArgs - 2));
1891 return true;
1892}
1893
1894/// Upgrade comment in call to inline asm that represents an objc retain release
1895/// marker.
1896void llvm::UpgradeInlineAsmString(std::string *AsmStr) {
1897 size_t Pos;
1898 if (AsmStr->find("mov\tfp") == 0 &&
1899 AsmStr->find("objc_retainAutoreleaseReturnValue") != std::string::npos &&
1900 (Pos = AsmStr->find("# marker")) != std::string::npos) {
1901 AsmStr->replace(Pos, 1, ";");
1902 }
1903}
1904
1905static Value *UpgradeARMIntrinsicCall(StringRef Name, CallBase *CI, Function *F,
1906 IRBuilder<> &Builder) {
1907 if (Name == "mve.vctp64.old") {
1908 // Replace the old v4i1 vctp64 with a v2i1 vctp and predicate-casts to the
1909 // correct type.
1910 Value *VCTP = Builder.CreateCall(
1911 Intrinsic::getDeclaration(F->getParent(), Intrinsic::arm_mve_vctp64),
1912 CI->getArgOperand(0), CI->getName());
1913 Value *C1 = Builder.CreateCall(
1914 Intrinsic::getDeclaration(
1915 F->getParent(), Intrinsic::arm_mve_pred_v2i,
1916 {VectorType::get(Builder.getInt1Ty(), 2, false)}),
1917 VCTP);
1918 return Builder.CreateCall(
1919 Intrinsic::getDeclaration(
1920 F->getParent(), Intrinsic::arm_mve_pred_i2v,
1921 {VectorType::get(Builder.getInt1Ty(), 4, false)}),
1922 C1);
1923 } else if (Name == "mve.mull.int.predicated.v2i64.v4i32.v4i1" ||
1924 Name == "mve.vqdmull.predicated.v2i64.v4i32.v4i1" ||
1925 Name == "mve.vldr.gather.base.predicated.v2i64.v2i64.v4i1" ||
1926 Name == "mve.vldr.gather.base.wb.predicated.v2i64.v2i64.v4i1" ||
1927 Name == "mve.vldr.gather.offset.predicated.v2i64.p0i64.v2i64.v4i1" ||
1928 Name == "mve.vstr.scatter.base.predicated.v2i64.v2i64.v4i1" ||
1929 Name == "mve.vstr.scatter.base.wb.predicated.v2i64.v2i64.v4i1" ||
1930 Name == "mve.vstr.scatter.offset.predicated.p0i64.v2i64.v2i64.v4i1" ||
1931 Name == "cde.vcx1q.predicated.v2i64.v4i1" ||
1932 Name == "cde.vcx1qa.predicated.v2i64.v4i1" ||
1933 Name == "cde.vcx2q.predicated.v2i64.v4i1" ||
1934 Name == "cde.vcx2qa.predicated.v2i64.v4i1" ||
1935 Name == "cde.vcx3q.predicated.v2i64.v4i1" ||
1936 Name == "cde.vcx3qa.predicated.v2i64.v4i1") {
1937 std::vector<Type *> Tys;
1938 unsigned ID = CI->getIntrinsicID();
1939 Type *V2I1Ty = FixedVectorType::get(Builder.getInt1Ty(), 2);
1940 switch (ID) {
1941 case Intrinsic::arm_mve_mull_int_predicated:
1942 case Intrinsic::arm_mve_vqdmull_predicated:
1943 case Intrinsic::arm_mve_vldr_gather_base_predicated:
1944 Tys = {CI->getType(), CI->getOperand(0)->getType(), V2I1Ty};
1945 break;
1946 case Intrinsic::arm_mve_vldr_gather_base_wb_predicated:
1947 case Intrinsic::arm_mve_vstr_scatter_base_predicated:
1948 case Intrinsic::arm_mve_vstr_scatter_base_wb_predicated:
1949 Tys = {CI->getOperand(0)->getType(), CI->getOperand(0)->getType(),
1950 V2I1Ty};
1951 break;
1952 case Intrinsic::arm_mve_vldr_gather_offset_predicated:
1953 Tys = {CI->getType(), CI->getOperand(0)->getType(),
1954 CI->getOperand(1)->getType(), V2I1Ty};
1955 break;
1956 case Intrinsic::arm_mve_vstr_scatter_offset_predicated:
1957 Tys = {CI->getOperand(0)->getType(), CI->getOperand(1)->getType(),
1958 CI->getOperand(2)->getType(), V2I1Ty};
1959 break;
1960 case Intrinsic::arm_cde_vcx1q_predicated:
1961 case Intrinsic::arm_cde_vcx1qa_predicated:
1962 case Intrinsic::arm_cde_vcx2q_predicated:
1963 case Intrinsic::arm_cde_vcx2qa_predicated:
1964 case Intrinsic::arm_cde_vcx3q_predicated:
1965 case Intrinsic::arm_cde_vcx3qa_predicated:
1966 Tys = {CI->getOperand(1)->getType(), V2I1Ty};
1967 break;
1968 default:
1969 llvm_unreachable("Unhandled Intrinsic!")::llvm::llvm_unreachable_internal("Unhandled Intrinsic!", "llvm/lib/IR/AutoUpgrade.cpp"
, 1969)
;
1970 }
1971
1972 std::vector<Value *> Ops;
1973 for (Value *Op : CI->args()) {
1974 Type *Ty = Op->getType();
1975 if (Ty->getScalarSizeInBits() == 1) {
1976 Value *C1 = Builder.CreateCall(
1977 Intrinsic::getDeclaration(
1978 F->getParent(), Intrinsic::arm_mve_pred_v2i,
1979 {VectorType::get(Builder.getInt1Ty(), 4, false)}),
1980 Op);
1981 Op = Builder.CreateCall(
1982 Intrinsic::getDeclaration(F->getParent(),
1983 Intrinsic::arm_mve_pred_i2v, {V2I1Ty}),
1984 C1);
1985 }
1986 Ops.push_back(Op);
1987 }
1988
1989 Function *Fn = Intrinsic::getDeclaration(F->getParent(), ID, Tys);
1990 return Builder.CreateCall(Fn, Ops, CI->getName());
1991 }
1992 llvm_unreachable("Unknown function for ARM CallBase upgrade.")::llvm::llvm_unreachable_internal("Unknown function for ARM CallBase upgrade."
, "llvm/lib/IR/AutoUpgrade.cpp", 1992)
;
1993}
1994
1995/// Upgrade a call to an old intrinsic. All argument and return casting must be
1996/// provided to seamlessly integrate with existing context.
1997void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
1998 Function *F = CI->getCalledFunction();
1999 LLVMContext &C = CI->getContext();
2000 IRBuilder<> Builder(C);
2001 Builder.SetInsertPoint(CI->getParent(), CI->getIterator());
2002
2003 assert(F && "Intrinsic call is not direct?")(static_cast <bool> (F && "Intrinsic call is not direct?"
) ? void (0) : __assert_fail ("F && \"Intrinsic call is not direct?\""
, "llvm/lib/IR/AutoUpgrade.cpp", 2003, __extension__ __PRETTY_FUNCTION__
))
;
2004
2005 if (!NewFn) {
2006 // Get the Function's name.
2007 StringRef Name = F->getName();
2008
2009 assert(Name.startswith("llvm.") && "Intrinsic doesn't start with 'llvm.'")(static_cast <bool> (Name.startswith("llvm.") &&
"Intrinsic doesn't start with 'llvm.'") ? void (0) : __assert_fail
("Name.startswith(\"llvm.\") && \"Intrinsic doesn't start with 'llvm.'\""
, "llvm/lib/IR/AutoUpgrade.cpp", 2009, __extension__ __PRETTY_FUNCTION__
))
;
2010 Name = Name.substr(5);
2011
2012 bool IsX86 = Name.startswith("x86.");
2013 if (IsX86)
2014 Name = Name.substr(4);
2015 bool IsNVVM = Name.startswith("nvvm.");
2016 if (IsNVVM)
2017 Name = Name.substr(5);
2018 bool IsARM = Name.startswith("arm.");
2019 if (IsARM)
2020 Name = Name.substr(4);
2021
2022 if (IsX86 && Name.startswith("sse4a.movnt.")) {
2023 Module *M = F->getParent();
2024 SmallVector<Metadata *, 1> Elts;
2025 Elts.push_back(
2026 ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1)));
2027 MDNode *Node = MDNode::get(C, Elts);
2028
2029 Value *Arg0 = CI->getArgOperand(0);
2030 Value *Arg1 = CI->getArgOperand(1);
2031
2032 // Nontemporal (unaligned) store of the 0'th element of the float/double
2033 // vector.
2034 Type *SrcEltTy = cast<VectorType>(Arg1->getType())->getElementType();
2035 PointerType *EltPtrTy = PointerType::getUnqual(SrcEltTy);
2036 Value *Addr = Builder.CreateBitCast(Arg0, EltPtrTy, "cast");
2037 Value *Extract =
2038 Builder.CreateExtractElement(Arg1, (uint64_t)0, "extractelement");
2039
2040 StoreInst *SI = Builder.CreateAlignedStore(Extract, Addr, Align(1));
2041 SI->setMetadata(M->getMDKindID("nontemporal"), Node);
2042
2043 // Remove intrinsic.
2044 CI->eraseFromParent();
2045 return;
2046 }
2047
2048 if (IsX86 && (Name.startswith("avx.movnt.") ||
2049 Name.startswith("avx512.storent."))) {
2050 Module *M = F->getParent();
2051 SmallVector<Metadata *, 1> Elts;
2052 Elts.push_back(
2053 ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1)));
2054 MDNode *Node = MDNode::get(C, Elts);
2055
2056 Value *Arg0 = CI->getArgOperand(0);
2057 Value *Arg1 = CI->getArgOperand(1);
2058
2059 // Convert the type of the pointer to a pointer to the stored type.
2060 Value *BC = Builder.CreateBitCast(Arg0,
2061 PointerType::getUnqual(Arg1->getType()),
2062 "cast");
2063 StoreInst *SI = Builder.CreateAlignedStore(
2064 Arg1, BC,
2065 Align(Arg1->getType()->getPrimitiveSizeInBits().getFixedSize() / 8));
2066 SI->setMetadata(M->getMDKindID("nontemporal"), Node);
2067
2068 // Remove intrinsic.
2069 CI->eraseFromParent();
2070 return;
2071 }
2072
2073 if (IsX86 && Name == "sse2.storel.dq") {
2074 Value *Arg0 = CI->getArgOperand(0);
2075 Value *Arg1 = CI->getArgOperand(1);
2076
2077 auto *NewVecTy = FixedVectorType::get(Type::getInt64Ty(C), 2);
2078 Value *BC0 = Builder.CreateBitCast(Arg1, NewVecTy, "cast");
2079 Value *Elt = Builder.CreateExtractElement(BC0, (uint64_t)0);
2080 Value *BC = Builder.CreateBitCast(Arg0,
2081 PointerType::getUnqual(Elt->getType()),
2082 "cast");
2083 Builder.CreateAlignedStore(Elt, BC, Align(1));
2084
2085 // Remove intrinsic.
2086 CI->eraseFromParent();
2087 return;
2088 }
2089
2090 if (IsX86 && (Name.startswith("sse.storeu.") ||
2091 Name.startswith("sse2.storeu.") ||
2092 Name.startswith("avx.storeu."))) {
2093 Value *Arg0 = CI->getArgOperand(0);
2094 Value *Arg1 = CI->getArgOperand(1);
2095
2096 Arg0 = Builder.CreateBitCast(Arg0,
2097 PointerType::getUnqual(Arg1->getType()),
2098 "cast");
2099 Builder.CreateAlignedStore(Arg1, Arg0, Align(1));
2100
2101 // Remove intrinsic.
2102 CI->eraseFromParent();
2103 return;
2104 }
2105
2106 if (IsX86 && Name == "avx512.mask.store.ss") {
2107 Value *Mask = Builder.CreateAnd(CI->getArgOperand(2), Builder.getInt8(1));
2108 UpgradeMaskedStore(Builder, CI->getArgOperand(0), CI->getArgOperand(1),
2109 Mask, false);
2110
2111 // Remove intrinsic.
2112 CI->eraseFromParent();
2113 return;
2114 }
2115
2116 if (IsX86 && (Name.startswith("avx512.mask.store"))) {
2117 // "avx512.mask.storeu." or "avx512.mask.store."
2118 bool Aligned = Name[17] != 'u'; // "avx512.mask.storeu".
2119 UpgradeMaskedStore(Builder, CI->getArgOperand(0), CI->getArgOperand(1),
2120 CI->getArgOperand(2), Aligned);
2121
2122 // Remove intrinsic.
2123 CI->eraseFromParent();
2124 return;
2125 }
2126
2127 Value *Rep;
2128 // Upgrade packed integer vector compare intrinsics to compare instructions.
2129 if (IsX86 && (Name.startswith("sse2.pcmp") ||
2130 Name.startswith("avx2.pcmp"))) {
2131 // "sse2.pcpmpeq." "sse2.pcmpgt." "avx2.pcmpeq." or "avx2.pcmpgt."
2132 bool CmpEq = Name[9] == 'e';
2133 Rep = Builder.CreateICmp(CmpEq ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_SGT,
2134 CI->getArgOperand(0), CI->getArgOperand(1));
2135 Rep = Builder.CreateSExt(Rep, CI->getType(), "");
2136 } else if (IsX86 && (Name.startswith("avx512.broadcastm"))) {
2137 Type *ExtTy = Type::getInt32Ty(C);
2138 if (CI->getOperand(0)->getType()->isIntegerTy(8))
2139 ExtTy = Type::getInt64Ty(C);
2140 unsigned NumElts = CI->getType()->getPrimitiveSizeInBits() /
2141 ExtTy->getPrimitiveSizeInBits();
2142 Rep = Builder.CreateZExt(CI->getArgOperand(0), ExtTy);
2143 Rep = Builder.CreateVectorSplat(NumElts, Rep);
2144 } else if (IsX86 && (Name == "sse.sqrt.ss" ||
2145 Name == "sse2.sqrt.sd")) {
2146 Value *Vec = CI->getArgOperand(0);
2147 Value *Elt0 = Builder.CreateExtractElement(Vec, (uint64_t)0);
2148 Function *Intr = Intrinsic::getDeclaration(F->getParent(),
2149 Intrinsic::sqrt, Elt0->getType());
2150 Elt0 = Builder.CreateCall(Intr, Elt0);
2151 Rep = Builder.CreateInsertElement(Vec, Elt0, (uint64_t)0);
2152 } else if (IsX86 && (Name.startswith("avx.sqrt.p") ||
2153 Name.startswith("sse2.sqrt.p") ||
2154 Name.startswith("sse.sqrt.p"))) {
2155 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(),
2156 Intrinsic::sqrt,
2157 CI->getType()),
2158 {CI->getArgOperand(0)});
2159 } else if (IsX86 && (Name.startswith("avx512.mask.sqrt.p"))) {
2160 if (CI->arg_size() == 4 &&
2161 (!isa<ConstantInt>(CI->getArgOperand(3)) ||
2162 cast<ConstantInt>(CI->getArgOperand(3))->getZExtValue() != 4)) {
2163 Intrinsic::ID IID = Name[18] == 's' ? Intrinsic::x86_avx512_sqrt_ps_512
2164 : Intrinsic::x86_avx512_sqrt_pd_512;
2165
2166 Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(3) };
2167 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(),
2168 IID), Args);
2169 } else {
2170 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(),
2171 Intrinsic::sqrt,
2172 CI->getType()),
2173 {CI->getArgOperand(0)});
2174 }
2175 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
2176 CI->getArgOperand(1));
2177 } else if (IsX86 && (Name.startswith("avx512.ptestm") ||
2178 Name.startswith("avx512.ptestnm"))) {
2179 Value *Op0 = CI->getArgOperand(0);
2180 Value *Op1 = CI->getArgOperand(1);
2181 Value *Mask = CI->getArgOperand(2);
2182 Rep = Builder.CreateAnd(Op0, Op1);
2183 llvm::Type *Ty = Op0->getType();
2184 Value *Zero = llvm::Constant::getNullValue(Ty);
2185 ICmpInst::Predicate Pred =
2186 Name.startswith("avx512.ptestm") ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ;
2187 Rep = Builder.CreateICmp(Pred, Rep, Zero);
2188 Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, Mask);
2189 } else if (IsX86 && (Name.startswith("avx512.mask.pbroadcast"))){
2190 unsigned NumElts = cast<FixedVectorType>(CI->getArgOperand(1)->getType())
2191 ->getNumElements();
2192 Rep = Builder.CreateVectorSplat(NumElts, CI->getArgOperand(0));
2193 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
2194 CI->getArgOperand(1));
2195 } else if (IsX86 && (Name.startswith("avx512.kunpck"))) {
2196 unsigned NumElts = CI->getType()->getScalarSizeInBits();
2197 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), NumElts);
2198 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), NumElts);
2199 int Indices[64];
2200 for (unsigned i = 0; i != NumElts; ++i)
2201 Indices[i] = i;
2202
2203 // First extract half of each vector. This gives better codegen than
2204 // doing it in a single shuffle.
2205 LHS = Builder.CreateShuffleVector(LHS, LHS,
2206 makeArrayRef(Indices, NumElts / 2));
2207 RHS = Builder.CreateShuffleVector(RHS, RHS,
2208 makeArrayRef(Indices, NumElts / 2));
2209 // Concat the vectors.
2210 // NOTE: Operands have to be swapped to match intrinsic definition.
2211 Rep = Builder.CreateShuffleVector(RHS, LHS,
2212 makeArrayRef(Indices, NumElts));
2213 Rep = Builder.CreateBitCast(Rep, CI->getType());
2214 } else if (IsX86 && Name == "avx512.kand.w") {
2215 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
2216 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
2217 Rep = Builder.CreateAnd(LHS, RHS);
2218 Rep = Builder.CreateBitCast(Rep, CI->getType());
2219 } else if (IsX86 && Name == "avx512.kandn.w") {
2220 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
2221 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
2222 LHS = Builder.CreateNot(LHS);
2223 Rep = Builder.CreateAnd(LHS, RHS);
2224 Rep = Builder.CreateBitCast(Rep, CI->getType());
2225 } else if (IsX86 && Name == "avx512.kor.w") {
2226 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
2227 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
2228 Rep = Builder.CreateOr(LHS, RHS);
2229 Rep = Builder.CreateBitCast(Rep, CI->getType());
2230 } else if (IsX86 && Name == "avx512.kxor.w") {
2231 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
2232 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
2233 Rep = Builder.CreateXor(LHS, RHS);
2234 Rep = Builder.CreateBitCast(Rep, CI->getType());
2235 } else if (IsX86 && Name == "avx512.kxnor.w") {
2236 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
2237 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
2238 LHS = Builder.CreateNot(LHS);
2239 Rep = Builder.CreateXor(LHS, RHS);
2240 Rep = Builder.CreateBitCast(Rep, CI->getType());
2241 } else if (IsX86 && Name == "avx512.knot.w") {
2242 Rep = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
2243 Rep = Builder.CreateNot(Rep);
2244 Rep = Builder.CreateBitCast(Rep, CI->getType());
2245 } else if (IsX86 &&
2246 (Name == "avx512.kortestz.w" || Name == "avx512.kortestc.w")) {
2247 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
2248 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
2249 Rep = Builder.CreateOr(LHS, RHS);
2250 Rep = Builder.CreateBitCast(Rep, Builder.getInt16Ty());
2251 Value *C;
2252 if (Name[14] == 'c')
2253 C = ConstantInt::getAllOnesValue(Builder.getInt16Ty());
2254 else
2255 C = ConstantInt::getNullValue(Builder.getInt16Ty());
2256 Rep = Builder.CreateICmpEQ(Rep, C);
2257 Rep = Builder.CreateZExt(Rep, Builder.getInt32Ty());
2258 } else if (IsX86 && (Name == "sse.add.ss" || Name == "sse2.add.sd" ||
2259 Name == "sse.sub.ss" || Name == "sse2.sub.sd" ||
2260 Name == "sse.mul.ss" || Name == "sse2.mul.sd" ||
2261 Name == "sse.div.ss" || Name == "sse2.div.sd")) {
2262 Type *I32Ty = Type::getInt32Ty(C);
2263 Value *Elt0 = Builder.CreateExtractElement(CI->getArgOperand(0),
2264 ConstantInt::get(I32Ty, 0));
2265 Value *Elt1 = Builder.CreateExtractElement(CI->getArgOperand(1),
2266 ConstantInt::get(I32Ty, 0));
2267 Value *EltOp;
2268 if (Name.contains(".add."))
2269 EltOp = Builder.CreateFAdd(Elt0, Elt1);
2270 else if (Name.contains(".sub."))
2271 EltOp = Builder.CreateFSub(Elt0, Elt1);
2272 else if (Name.contains(".mul."))
2273 EltOp = Builder.CreateFMul(Elt0, Elt1);
2274 else
2275 EltOp = Builder.CreateFDiv(Elt0, Elt1);
2276 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), EltOp,
2277 ConstantInt::get(I32Ty, 0));
2278 } else if (IsX86 && Name.startswith("avx512.mask.pcmp")) {
2279 // "avx512.mask.pcmpeq." or "avx512.mask.pcmpgt."
2280 bool CmpEq = Name[16] == 'e';
2281 Rep = upgradeMaskedCompare(Builder, *CI, CmpEq ? 0 : 6, true);
2282 } else if (IsX86 && Name.startswith("avx512.mask.vpshufbitqmb.")) {
2283 Type *OpTy = CI->getArgOperand(0)->getType();
2284 unsigned VecWidth = OpTy->getPrimitiveSizeInBits();
2285 Intrinsic::ID IID;
2286 switch (VecWidth) {
2287 default: llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 2287)
;
2288 case 128: IID = Intrinsic::x86_avx512_vpshufbitqmb_128; break;
2289 case 256: IID = Intrinsic::x86_avx512_vpshufbitqmb_256; break;
2290 case 512: IID = Intrinsic::x86_avx512_vpshufbitqmb_512; break;
2291 }
2292
2293 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
2294 { CI->getOperand(0), CI->getArgOperand(1) });
2295 Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, CI->getArgOperand(2));
2296 } else if (IsX86 && Name.startswith("avx512.mask.fpclass.p")) {
2297 Type *OpTy = CI->getArgOperand(0)->getType();
2298 unsigned VecWidth = OpTy->getPrimitiveSizeInBits();
2299 unsigned EltWidth = OpTy->getScalarSizeInBits();
2300 Intrinsic::ID IID;
2301 if (VecWidth == 128 && EltWidth == 32)
2302 IID = Intrinsic::x86_avx512_fpclass_ps_128;
2303 else if (VecWidth == 256 && EltWidth == 32)
2304 IID = Intrinsic::x86_avx512_fpclass_ps_256;
2305 else if (VecWidth == 512 && EltWidth == 32)
2306 IID = Intrinsic::x86_avx512_fpclass_ps_512;
2307 else if (VecWidth == 128 && EltWidth == 64)
2308 IID = Intrinsic::x86_avx512_fpclass_pd_128;
2309 else if (VecWidth == 256 && EltWidth == 64)
2310 IID = Intrinsic::x86_avx512_fpclass_pd_256;
2311 else if (VecWidth == 512 && EltWidth == 64)
2312 IID = Intrinsic::x86_avx512_fpclass_pd_512;
2313 else
2314 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 2314)
;
2315
2316 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
2317 { CI->getOperand(0), CI->getArgOperand(1) });
2318 Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, CI->getArgOperand(2));
2319 } else if (IsX86 && Name.startswith("avx512.cmp.p")) {
2320 SmallVector<Value *, 4> Args(CI->args());
2321 Type *OpTy = Args[0]->getType();
2322 unsigned VecWidth = OpTy->getPrimitiveSizeInBits();
2323 unsigned EltWidth = OpTy->getScalarSizeInBits();
2324 Intrinsic::ID IID;
2325 if (VecWidth == 128 && EltWidth == 32)
2326 IID = Intrinsic::x86_avx512_mask_cmp_ps_128;
2327 else if (VecWidth == 256 && EltWidth == 32)
2328 IID = Intrinsic::x86_avx512_mask_cmp_ps_256;
2329 else if (VecWidth == 512 && EltWidth == 32)
2330 IID = Intrinsic::x86_avx512_mask_cmp_ps_512;
2331 else if (VecWidth == 128 && EltWidth == 64)
2332 IID = Intrinsic::x86_avx512_mask_cmp_pd_128;
2333 else if (VecWidth == 256 && EltWidth == 64)
2334 IID = Intrinsic::x86_avx512_mask_cmp_pd_256;
2335 else if (VecWidth == 512 && EltWidth == 64)
2336 IID = Intrinsic::x86_avx512_mask_cmp_pd_512;
2337 else
2338 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 2338)
;
2339
2340 Value *Mask = Constant::getAllOnesValue(CI->getType());
2341 if (VecWidth == 512)
2342 std::swap(Mask, Args.back());
2343 Args.push_back(Mask);
2344
2345 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
2346 Args);
2347 } else if (IsX86 && Name.startswith("avx512.mask.cmp.")) {
2348 // Integer compare intrinsics.
2349 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
2350 Rep = upgradeMaskedCompare(Builder, *CI, Imm, true);
2351 } else if (IsX86 && Name.startswith("avx512.mask.ucmp.")) {
2352 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
2353 Rep = upgradeMaskedCompare(Builder, *CI, Imm, false);
2354 } else if (IsX86 && (Name.startswith("avx512.cvtb2mask.") ||
2355 Name.startswith("avx512.cvtw2mask.") ||
2356 Name.startswith("avx512.cvtd2mask.") ||
2357 Name.startswith("avx512.cvtq2mask."))) {
2358 Value *Op = CI->getArgOperand(0);
2359 Value *Zero = llvm::Constant::getNullValue(Op->getType());
2360 Rep = Builder.CreateICmp(ICmpInst::ICMP_SLT, Op, Zero);
2361 Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, nullptr);
2362 } else if(IsX86 && (Name == "ssse3.pabs.b.128" ||
2363 Name == "ssse3.pabs.w.128" ||
2364 Name == "ssse3.pabs.d.128" ||
2365 Name.startswith("avx2.pabs") ||
2366 Name.startswith("avx512.mask.pabs"))) {
2367 Rep = upgradeAbs(Builder, *CI);
2368 } else if (IsX86 && (Name == "sse41.pmaxsb" ||
2369 Name == "sse2.pmaxs.w" ||
2370 Name == "sse41.pmaxsd" ||
2371 Name.startswith("avx2.pmaxs") ||
2372 Name.startswith("avx512.mask.pmaxs"))) {
2373 Rep = UpgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::smax);
2374 } else if (IsX86 && (Name == "sse2.pmaxu.b" ||
2375 Name == "sse41.pmaxuw" ||
2376 Name == "sse41.pmaxud" ||
2377 Name.startswith("avx2.pmaxu") ||
2378 Name.startswith("avx512.mask.pmaxu"))) {
2379 Rep = UpgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::umax);
2380 } else if (IsX86 && (Name == "sse41.pminsb" ||
2381 Name == "sse2.pmins.w" ||
2382 Name == "sse41.pminsd" ||
2383 Name.startswith("avx2.pmins") ||
2384 Name.startswith("avx512.mask.pmins"))) {
2385 Rep = UpgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::smin);
2386 } else if (IsX86 && (Name == "sse2.pminu.b" ||
2387 Name == "sse41.pminuw" ||
2388 Name == "sse41.pminud" ||
2389 Name.startswith("avx2.pminu") ||
2390 Name.startswith("avx512.mask.pminu"))) {
2391 Rep = UpgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::umin);
2392 } else if (IsX86 && (Name == "sse2.pmulu.dq" ||
2393 Name == "avx2.pmulu.dq" ||
2394 Name == "avx512.pmulu.dq.512" ||
2395 Name.startswith("avx512.mask.pmulu.dq."))) {
2396 Rep = upgradePMULDQ(Builder, *CI, /*Signed*/false);
2397 } else if (IsX86 && (Name == "sse41.pmuldq" ||
2398 Name == "avx2.pmul.dq" ||
2399 Name == "avx512.pmul.dq.512" ||
2400 Name.startswith("avx512.mask.pmul.dq."))) {
2401 Rep = upgradePMULDQ(Builder, *CI, /*Signed*/true);
2402 } else if (IsX86 && (Name == "sse.cvtsi2ss" ||
2403 Name == "sse2.cvtsi2sd" ||
2404 Name == "sse.cvtsi642ss" ||
2405 Name == "sse2.cvtsi642sd")) {
2406 Rep = Builder.CreateSIToFP(
2407 CI->getArgOperand(1),
2408 cast<VectorType>(CI->getType())->getElementType());
2409 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0);
2410 } else if (IsX86 && Name == "avx512.cvtusi2sd") {
2411 Rep = Builder.CreateUIToFP(
2412 CI->getArgOperand(1),
2413 cast<VectorType>(CI->getType())->getElementType());
2414 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0);
2415 } else if (IsX86 && Name == "sse2.cvtss2sd") {
2416 Rep = Builder.CreateExtractElement(CI->getArgOperand(1), (uint64_t)0);
2417 Rep = Builder.CreateFPExt(
2418 Rep, cast<VectorType>(CI->getType())->getElementType());
2419 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0);
2420 } else if (IsX86 && (Name == "sse2.cvtdq2pd" ||
2421 Name == "sse2.cvtdq2ps" ||
2422 Name == "avx.cvtdq2.pd.256" ||
2423 Name == "avx.cvtdq2.ps.256" ||
2424 Name.startswith("avx512.mask.cvtdq2pd.") ||
2425 Name.startswith("avx512.mask.cvtudq2pd.") ||
2426 Name.startswith("avx512.mask.cvtdq2ps.") ||
2427 Name.startswith("avx512.mask.cvtudq2ps.") ||
2428 Name.startswith("avx512.mask.cvtqq2pd.") ||
2429 Name.startswith("avx512.mask.cvtuqq2pd.") ||
2430 Name == "avx512.mask.cvtqq2ps.256" ||
2431 Name == "avx512.mask.cvtqq2ps.512" ||
2432 Name == "avx512.mask.cvtuqq2ps.256" ||
2433 Name == "avx512.mask.cvtuqq2ps.512" ||
2434 Name == "sse2.cvtps2pd" ||
2435 Name == "avx.cvt.ps2.pd.256" ||
2436 Name == "avx512.mask.cvtps2pd.128" ||
2437 Name == "avx512.mask.cvtps2pd.256")) {
2438 auto *DstTy = cast<FixedVectorType>(CI->getType());
2439 Rep = CI->getArgOperand(0);
2440 auto *SrcTy = cast<FixedVectorType>(Rep->getType());
2441
2442 unsigned NumDstElts = DstTy->getNumElements();
2443 if (NumDstElts < SrcTy->getNumElements()) {
2444 assert(NumDstElts == 2 && "Unexpected vector size")(static_cast <bool> (NumDstElts == 2 && "Unexpected vector size"
) ? void (0) : __assert_fail ("NumDstElts == 2 && \"Unexpected vector size\""
, "llvm/lib/IR/AutoUpgrade.cpp", 2444, __extension__ __PRETTY_FUNCTION__
))
;
2445 Rep = Builder.CreateShuffleVector(Rep, Rep, ArrayRef<int>{0, 1});
2446 }
2447
2448 bool IsPS2PD = SrcTy->getElementType()->isFloatTy();
2449 bool IsUnsigned = (StringRef::npos != Name.find("cvtu"));
2450 if (IsPS2PD)
2451 Rep = Builder.CreateFPExt(Rep, DstTy, "cvtps2pd");
2452 else if (CI->arg_size() == 4 &&
2453 (!isa<ConstantInt>(CI->getArgOperand(3)) ||
2454 cast<ConstantInt>(CI->getArgOperand(3))->getZExtValue() != 4)) {
2455 Intrinsic::ID IID = IsUnsigned ? Intrinsic::x86_avx512_uitofp_round
2456 : Intrinsic::x86_avx512_sitofp_round;
2457 Function *F = Intrinsic::getDeclaration(CI->getModule(), IID,
2458 { DstTy, SrcTy });
2459 Rep = Builder.CreateCall(F, { Rep, CI->getArgOperand(3) });
2460 } else {
2461 Rep = IsUnsigned ? Builder.CreateUIToFP(Rep, DstTy, "cvt")
2462 : Builder.CreateSIToFP(Rep, DstTy, "cvt");
2463 }
2464
2465 if (CI->arg_size() >= 3)
2466 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
2467 CI->getArgOperand(1));
2468 } else if (IsX86 && (Name.startswith("avx512.mask.vcvtph2ps.") ||
2469 Name.startswith("vcvtph2ps."))) {
2470 auto *DstTy = cast<FixedVectorType>(CI->getType());
2471 Rep = CI->getArgOperand(0);
2472 auto *SrcTy = cast<FixedVectorType>(Rep->getType());
2473 unsigned NumDstElts = DstTy->getNumElements();
2474 if (NumDstElts != SrcTy->getNumElements()) {
2475 assert(NumDstElts == 4 && "Unexpected vector size")(static_cast <bool> (NumDstElts == 4 && "Unexpected vector size"
) ? void (0) : __assert_fail ("NumDstElts == 4 && \"Unexpected vector size\""
, "llvm/lib/IR/AutoUpgrade.cpp", 2475, __extension__ __PRETTY_FUNCTION__
))
;
2476 Rep = Builder.CreateShuffleVector(Rep, Rep, ArrayRef<int>{0, 1, 2, 3});
2477 }
2478 Rep = Builder.CreateBitCast(
2479 Rep, FixedVectorType::get(Type::getHalfTy(C), NumDstElts));
2480 Rep = Builder.CreateFPExt(Rep, DstTy, "cvtph2ps");
2481 if (CI->arg_size() >= 3)
2482 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
2483 CI->getArgOperand(1));
2484 } else if (IsX86 && Name.startswith("avx512.mask.load")) {
2485 // "avx512.mask.loadu." or "avx512.mask.load."
2486 bool Aligned = Name[16] != 'u'; // "avx512.mask.loadu".
2487 Rep =
2488 UpgradeMaskedLoad(Builder, CI->getArgOperand(0), CI->getArgOperand(1),
2489 CI->getArgOperand(2), Aligned);
2490 } else if (IsX86 && Name.startswith("avx512.mask.expand.load.")) {
2491 auto *ResultTy = cast<FixedVectorType>(CI->getType());
2492 Type *PtrTy = ResultTy->getElementType();
2493
2494 // Cast the pointer to element type.
2495 Value *Ptr = Builder.CreateBitCast(CI->getOperand(0),
2496 llvm::PointerType::getUnqual(PtrTy));
2497
2498 Value *MaskVec = getX86MaskVec(Builder, CI->getArgOperand(2),
2499 ResultTy->getNumElements());
2500
2501 Function *ELd = Intrinsic::getDeclaration(F->getParent(),
2502 Intrinsic::masked_expandload,
2503 ResultTy);
2504 Rep = Builder.CreateCall(ELd, { Ptr, MaskVec, CI->getOperand(1) });
2505 } else if (IsX86 && Name.startswith("avx512.mask.compress.store.")) {
2506 auto *ResultTy = cast<VectorType>(CI->getArgOperand(1)->getType());
2507 Type *PtrTy = ResultTy->getElementType();
2508
2509 // Cast the pointer to element type.
2510 Value *Ptr = Builder.CreateBitCast(CI->getOperand(0),
2511 llvm::PointerType::getUnqual(PtrTy));
2512
2513 Value *MaskVec =
2514 getX86MaskVec(Builder, CI->getArgOperand(2),
2515 cast<FixedVectorType>(ResultTy)->getNumElements());
2516
2517 Function *CSt = Intrinsic::getDeclaration(F->getParent(),
2518 Intrinsic::masked_compressstore,
2519 ResultTy);
2520 Rep = Builder.CreateCall(CSt, { CI->getArgOperand(1), Ptr, MaskVec });
2521 } else if (IsX86 && (Name.startswith("avx512.mask.compress.") ||
2522 Name.startswith("avx512.mask.expand."))) {
2523 auto *ResultTy = cast<FixedVectorType>(CI->getType());
2524
2525 Value *MaskVec = getX86MaskVec(Builder, CI->getArgOperand(2),
2526 ResultTy->getNumElements());
2527
2528 bool IsCompress = Name[12] == 'c';
2529 Intrinsic::ID IID = IsCompress ? Intrinsic::x86_avx512_mask_compress
2530 : Intrinsic::x86_avx512_mask_expand;
2531 Function *Intr = Intrinsic::getDeclaration(F->getParent(), IID, ResultTy);
2532 Rep = Builder.CreateCall(Intr, { CI->getOperand(0), CI->getOperand(1),
2533 MaskVec });
2534 } else if (IsX86 && Name.startswith("xop.vpcom")) {
2535 bool IsSigned;
2536 if (Name.endswith("ub") || Name.endswith("uw") || Name.endswith("ud") ||
2537 Name.endswith("uq"))
2538 IsSigned = false;
2539 else if (Name.endswith("b") || Name.endswith("w") || Name.endswith("d") ||
2540 Name.endswith("q"))
2541 IsSigned = true;
2542 else
2543 llvm_unreachable("Unknown suffix")::llvm::llvm_unreachable_internal("Unknown suffix", "llvm/lib/IR/AutoUpgrade.cpp"
, 2543)
;
2544
2545 unsigned Imm;
2546 if (CI->arg_size() == 3) {
2547 Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
2548 } else {
2549 Name = Name.substr(9); // strip off "xop.vpcom"
2550 if (Name.startswith("lt"))
2551 Imm = 0;
2552 else if (Name.startswith("le"))
2553 Imm = 1;
2554 else if (Name.startswith("gt"))
2555 Imm = 2;
2556 else if (Name.startswith("ge"))
2557 Imm = 3;
2558 else if (Name.startswith("eq"))
2559 Imm = 4;
2560 else if (Name.startswith("ne"))
2561 Imm = 5;
2562 else if (Name.startswith("false"))
2563 Imm = 6;
2564 else if (Name.startswith("true"))
2565 Imm = 7;
2566 else
2567 llvm_unreachable("Unknown condition")::llvm::llvm_unreachable_internal("Unknown condition", "llvm/lib/IR/AutoUpgrade.cpp"
, 2567)
;
2568 }
2569
2570 Rep = upgradeX86vpcom(Builder, *CI, Imm, IsSigned);
2571 } else if (IsX86 && Name.startswith("xop.vpcmov")) {
2572 Value *Sel = CI->getArgOperand(2);
2573 Value *NotSel = Builder.CreateNot(Sel);
2574 Value *Sel0 = Builder.CreateAnd(CI->getArgOperand(0), Sel);
2575 Value *Sel1 = Builder.CreateAnd(CI->getArgOperand(1), NotSel);
2576 Rep = Builder.CreateOr(Sel0, Sel1);
2577 } else if (IsX86 && (Name.startswith("xop.vprot") ||
2578 Name.startswith("avx512.prol") ||
2579 Name.startswith("avx512.mask.prol"))) {
2580 Rep = upgradeX86Rotate(Builder, *CI, false);
2581 } else if (IsX86 && (Name.startswith("avx512.pror") ||
2582 Name.startswith("avx512.mask.pror"))) {
2583 Rep = upgradeX86Rotate(Builder, *CI, true);
2584 } else if (IsX86 && (Name.startswith("avx512.vpshld.") ||
2585 Name.startswith("avx512.mask.vpshld") ||
2586 Name.startswith("avx512.maskz.vpshld"))) {
2587 bool ZeroMask = Name[11] == 'z';
2588 Rep = upgradeX86ConcatShift(Builder, *CI, false, ZeroMask);
2589 } else if (IsX86 && (Name.startswith("avx512.vpshrd.") ||
2590 Name.startswith("avx512.mask.vpshrd") ||
2591 Name.startswith("avx512.maskz.vpshrd"))) {
2592 bool ZeroMask = Name[11] == 'z';
2593 Rep = upgradeX86ConcatShift(Builder, *CI, true, ZeroMask);
2594 } else if (IsX86 && Name == "sse42.crc32.64.8") {
2595 Function *CRC32 = Intrinsic::getDeclaration(F->getParent(),
2596 Intrinsic::x86_sse42_crc32_32_8);
2597 Value *Trunc0 = Builder.CreateTrunc(CI->getArgOperand(0), Type::getInt32Ty(C));
2598 Rep = Builder.CreateCall(CRC32, {Trunc0, CI->getArgOperand(1)});
2599 Rep = Builder.CreateZExt(Rep, CI->getType(), "");
2600 } else if (IsX86 && (Name.startswith("avx.vbroadcast.s") ||
2601 Name.startswith("avx512.vbroadcast.s"))) {
2602 // Replace broadcasts with a series of insertelements.
2603 auto *VecTy = cast<FixedVectorType>(CI->getType());
2604 Type *EltTy = VecTy->getElementType();
2605 unsigned EltNum = VecTy->getNumElements();
2606 Value *Cast = Builder.CreateBitCast(CI->getArgOperand(0),
2607 EltTy->getPointerTo());
2608 Value *Load = Builder.CreateLoad(EltTy, Cast);
2609 Type *I32Ty = Type::getInt32Ty(C);
2610 Rep = PoisonValue::get(VecTy);
2611 for (unsigned I = 0; I < EltNum; ++I)
2612 Rep = Builder.CreateInsertElement(Rep, Load,
2613 ConstantInt::get(I32Ty, I));
2614 } else if (IsX86 && (Name.startswith("sse41.pmovsx") ||
2615 Name.startswith("sse41.pmovzx") ||
2616 Name.startswith("avx2.pmovsx") ||
2617 Name.startswith("avx2.pmovzx") ||
2618 Name.startswith("avx512.mask.pmovsx") ||
2619 Name.startswith("avx512.mask.pmovzx"))) {
2620 auto *DstTy = cast<FixedVectorType>(CI->getType());
2621 unsigned NumDstElts = DstTy->getNumElements();
2622
2623 // Extract a subvector of the first NumDstElts lanes and sign/zero extend.
2624 SmallVector<int, 8> ShuffleMask(NumDstElts);
2625 for (unsigned i = 0; i != NumDstElts; ++i)
2626 ShuffleMask[i] = i;
2627
2628 Value *SV =
2629 Builder.CreateShuffleVector(CI->getArgOperand(0), ShuffleMask);
2630
2631 bool DoSext = (StringRef::npos != Name.find("pmovsx"));
2632 Rep = DoSext ? Builder.CreateSExt(SV, DstTy)
2633 : Builder.CreateZExt(SV, DstTy);
2634 // If there are 3 arguments, it's a masked intrinsic so we need a select.
2635 if (CI->arg_size() == 3)
2636 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
2637 CI->getArgOperand(1));
2638 } else if (Name == "avx512.mask.pmov.qd.256" ||
2639 Name == "avx512.mask.pmov.qd.512" ||
2640 Name == "avx512.mask.pmov.wb.256" ||
2641 Name == "avx512.mask.pmov.wb.512") {
2642 Type *Ty = CI->getArgOperand(1)->getType();
2643 Rep = Builder.CreateTrunc(CI->getArgOperand(0), Ty);
2644 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
2645 CI->getArgOperand(1));
2646 } else if (IsX86 && (Name.startswith("avx.vbroadcastf128") ||
2647 Name == "avx2.vbroadcasti128")) {
2648 // Replace vbroadcastf128/vbroadcasti128 with a vector load+shuffle.
2649 Type *EltTy = cast<VectorType>(CI->getType())->getElementType();
2650 unsigned NumSrcElts = 128 / EltTy->getPrimitiveSizeInBits();
2651 auto *VT = FixedVectorType::get(EltTy, NumSrcElts);
2652 Value *Op = Builder.CreatePointerCast(CI->getArgOperand(0),
2653 PointerType::getUnqual(VT));
2654 Value *Load = Builder.CreateAlignedLoad(VT, Op, Align(1));
2655 if (NumSrcElts == 2)
2656 Rep = Builder.CreateShuffleVector(Load, ArrayRef<int>{0, 1, 0, 1});
2657 else
2658 Rep = Builder.CreateShuffleVector(
2659 Load, ArrayRef<int>{0, 1, 2, 3, 0, 1, 2, 3});
2660 } else if (IsX86 && (Name.startswith("avx512.mask.shuf.i") ||
2661 Name.startswith("avx512.mask.shuf.f"))) {
2662 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
2663 Type *VT = CI->getType();
2664 unsigned NumLanes = VT->getPrimitiveSizeInBits() / 128;
2665 unsigned NumElementsInLane = 128 / VT->getScalarSizeInBits();
2666 unsigned ControlBitsMask = NumLanes - 1;
2667 unsigned NumControlBits = NumLanes / 2;
2668 SmallVector<int, 8> ShuffleMask(0);
2669
2670 for (unsigned l = 0; l != NumLanes; ++l) {
2671 unsigned LaneMask = (Imm >> (l * NumControlBits)) & ControlBitsMask;
2672 // We actually need the other source.
2673 if (l >= NumLanes / 2)
2674 LaneMask += NumLanes;
2675 for (unsigned i = 0; i != NumElementsInLane; ++i)
2676 ShuffleMask.push_back(LaneMask * NumElementsInLane + i);
2677 }
2678 Rep = Builder.CreateShuffleVector(CI->getArgOperand(0),
2679 CI->getArgOperand(1), ShuffleMask);
2680 Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep,
2681 CI->getArgOperand(3));
2682 }else if (IsX86 && (Name.startswith("avx512.mask.broadcastf") ||
2683 Name.startswith("avx512.mask.broadcasti"))) {
2684 unsigned NumSrcElts =
2685 cast<FixedVectorType>(CI->getArgOperand(0)->getType())
2686 ->getNumElements();
2687 unsigned NumDstElts =
2688 cast<FixedVectorType>(CI->getType())->getNumElements();
2689
2690 SmallVector<int, 8> ShuffleMask(NumDstElts);
2691 for (unsigned i = 0; i != NumDstElts; ++i)
2692 ShuffleMask[i] = i % NumSrcElts;
2693
2694 Rep = Builder.CreateShuffleVector(CI->getArgOperand(0),
2695 CI->getArgOperand(0),
2696 ShuffleMask);
2697 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
2698 CI->getArgOperand(1));
2699 } else if (IsX86 && (Name.startswith("avx2.pbroadcast") ||
2700 Name.startswith("avx2.vbroadcast") ||
2701 Name.startswith("avx512.pbroadcast") ||
2702 Name.startswith("avx512.mask.broadcast.s"))) {
2703 // Replace vp?broadcasts with a vector shuffle.
2704 Value *Op = CI->getArgOperand(0);
2705 ElementCount EC = cast<VectorType>(CI->getType())->getElementCount();
2706 Type *MaskTy = VectorType::get(Type::getInt32Ty(C), EC);
2707 SmallVector<int, 8> M;
2708 ShuffleVectorInst::getShuffleMask(Constant::getNullValue(MaskTy), M);
2709 Rep = Builder.CreateShuffleVector(Op, M);
2710
2711 if (CI->arg_size() == 3)
2712 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
2713 CI->getArgOperand(1));
2714 } else if (IsX86 && (Name.startswith("sse2.padds.") ||
2715 Name.startswith("avx2.padds.") ||
2716 Name.startswith("avx512.padds.") ||
2717 Name.startswith("avx512.mask.padds."))) {
2718 Rep = UpgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::sadd_sat);
2719 } else if (IsX86 && (Name.startswith("sse2.psubs.") ||
2720 Name.startswith("avx2.psubs.") ||
2721 Name.startswith("avx512.psubs.") ||
2722 Name.startswith("avx512.mask.psubs."))) {
2723 Rep = UpgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::ssub_sat);
2724 } else if (IsX86 && (Name.startswith("sse2.paddus.") ||
2725 Name.startswith("avx2.paddus.") ||
2726 Name.startswith("avx512.mask.paddus."))) {
2727 Rep = UpgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::uadd_sat);
2728 } else if (IsX86 && (Name.startswith("sse2.psubus.") ||
2729 Name.startswith("avx2.psubus.") ||
2730 Name.startswith("avx512.mask.psubus."))) {
2731 Rep = UpgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::usub_sat);
2732 } else if (IsX86 && Name.startswith("avx512.mask.palignr.")) {
2733 Rep = UpgradeX86ALIGNIntrinsics(Builder, CI->getArgOperand(0),
2734 CI->getArgOperand(1),
2735 CI->getArgOperand(2),
2736 CI->getArgOperand(3),
2737 CI->getArgOperand(4),
2738 false);
2739 } else if (IsX86 && Name.startswith("avx512.mask.valign.")) {
2740 Rep = UpgradeX86ALIGNIntrinsics(Builder, CI->getArgOperand(0),
2741 CI->getArgOperand(1),
2742 CI->getArgOperand(2),
2743 CI->getArgOperand(3),
2744 CI->getArgOperand(4),
2745 true);
2746 } else if (IsX86 && (Name == "sse2.psll.dq" ||
2747 Name == "avx2.psll.dq")) {
2748 // 128/256-bit shift left specified in bits.
2749 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
2750 Rep = UpgradeX86PSLLDQIntrinsics(Builder, CI->getArgOperand(0),
2751 Shift / 8); // Shift is in bits.
2752 } else if (IsX86 && (Name == "sse2.psrl.dq" ||
2753 Name == "avx2.psrl.dq")) {
2754 // 128/256-bit shift right specified in bits.
2755 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
2756 Rep = UpgradeX86PSRLDQIntrinsics(Builder, CI->getArgOperand(0),
2757 Shift / 8); // Shift is in bits.
2758 } else if (IsX86 && (Name == "sse2.psll.dq.bs" ||
2759 Name == "avx2.psll.dq.bs" ||
2760 Name == "avx512.psll.dq.512")) {
2761 // 128/256/512-bit shift left specified in bytes.
2762 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
2763 Rep = UpgradeX86PSLLDQIntrinsics(Builder, CI->getArgOperand(0), Shift);
2764 } else if (IsX86 && (Name == "sse2.psrl.dq.bs" ||
2765 Name == "avx2.psrl.dq.bs" ||
2766 Name == "avx512.psrl.dq.512")) {
2767 // 128/256/512-bit shift right specified in bytes.
2768 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
2769 Rep = UpgradeX86PSRLDQIntrinsics(Builder, CI->getArgOperand(0), Shift);
2770 } else if (IsX86 && (Name == "sse41.pblendw" ||
2771 Name.startswith("sse41.blendp") ||
2772 Name.startswith("avx.blend.p") ||
2773 Name == "avx2.pblendw" ||
2774 Name.startswith("avx2.pblendd."))) {
2775 Value *Op0 = CI->getArgOperand(0);
2776 Value *Op1 = CI->getArgOperand(1);
2777 unsigned Imm = cast <ConstantInt>(CI->getArgOperand(2))->getZExtValue();
2778 auto *VecTy = cast<FixedVectorType>(CI->getType());
2779 unsigned NumElts = VecTy->getNumElements();
2780
2781 SmallVector<int, 16> Idxs(NumElts);
2782 for (unsigned i = 0; i != NumElts; ++i)
2783 Idxs[i] = ((Imm >> (i%8)) & 1) ? i + NumElts : i;
2784
2785 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
2786 } else if (IsX86 && (Name.startswith("avx.vinsertf128.") ||
2787 Name == "avx2.vinserti128" ||
2788 Name.startswith("avx512.mask.insert"))) {
2789 Value *Op0 = CI->getArgOperand(0);
2790 Value *Op1 = CI->getArgOperand(1);
2791 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
2792 unsigned DstNumElts =
2793 cast<FixedVectorType>(CI->getType())->getNumElements();
2794 unsigned SrcNumElts =
2795 cast<FixedVectorType>(Op1->getType())->getNumElements();
2796 unsigned Scale = DstNumElts / SrcNumElts;
2797
2798 // Mask off the high bits of the immediate value; hardware ignores those.
2799 Imm = Imm % Scale;
2800
2801 // Extend the second operand into a vector the size of the destination.
2802 SmallVector<int, 8> Idxs(DstNumElts);
2803 for (unsigned i = 0; i != SrcNumElts; ++i)
2804 Idxs[i] = i;
2805 for (unsigned i = SrcNumElts; i != DstNumElts; ++i)
2806 Idxs[i] = SrcNumElts;
2807 Rep = Builder.CreateShuffleVector(Op1, Idxs);
2808
2809 // Insert the second operand into the first operand.
2810
2811 // Note that there is no guarantee that instruction lowering will actually
2812 // produce a vinsertf128 instruction for the created shuffles. In
2813 // particular, the 0 immediate case involves no lane changes, so it can
2814 // be handled as a blend.
2815
2816 // Example of shuffle mask for 32-bit elements:
2817 // Imm = 1 <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
2818 // Imm = 0 <i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7 >
2819
2820 // First fill with identify mask.
2821 for (unsigned i = 0; i != DstNumElts; ++i)
2822 Idxs[i] = i;
2823 // Then replace the elements where we need to insert.
2824 for (unsigned i = 0; i != SrcNumElts; ++i)
2825 Idxs[i + Imm * SrcNumElts] = i + DstNumElts;
2826 Rep = Builder.CreateShuffleVector(Op0, Rep, Idxs);
2827
2828 // If the intrinsic has a mask operand, handle that.
2829 if (CI->arg_size() == 5)
2830 Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep,
2831 CI->getArgOperand(3));
2832 } else if (IsX86 && (Name.startswith("avx.vextractf128.") ||
2833 Name == "avx2.vextracti128" ||
2834 Name.startswith("avx512.mask.vextract"))) {
2835 Value *Op0 = CI->getArgOperand(0);
2836 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
2837 unsigned DstNumElts =
2838 cast<FixedVectorType>(CI->getType())->getNumElements();
2839 unsigned SrcNumElts =
2840 cast<FixedVectorType>(Op0->getType())->getNumElements();
2841 unsigned Scale = SrcNumElts / DstNumElts;
2842
2843 // Mask off the high bits of the immediate value; hardware ignores those.
2844 Imm = Imm % Scale;
2845
2846 // Get indexes for the subvector of the input vector.
2847 SmallVector<int, 8> Idxs(DstNumElts);
2848 for (unsigned i = 0; i != DstNumElts; ++i) {
2849 Idxs[i] = i + (Imm * DstNumElts);
2850 }
2851 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
2852
2853 // If the intrinsic has a mask operand, handle that.
2854 if (CI->arg_size() == 4)
2855 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
2856 CI->getArgOperand(2));
2857 } else if (!IsX86 && Name == "stackprotectorcheck") {
2858 Rep = nullptr;
2859 } else if (IsX86 && (Name.startswith("avx512.mask.perm.df.") ||
2860 Name.startswith("avx512.mask.perm.di."))) {
2861 Value *Op0 = CI->getArgOperand(0);
2862 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
2863 auto *VecTy = cast<FixedVectorType>(CI->getType());
2864 unsigned NumElts = VecTy->getNumElements();
2865
2866 SmallVector<int, 8> Idxs(NumElts);
2867 for (unsigned i = 0; i != NumElts; ++i)
2868 Idxs[i] = (i & ~0x3) + ((Imm >> (2 * (i & 0x3))) & 3);
2869
2870 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
2871
2872 if (CI->arg_size() == 4)
2873 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
2874 CI->getArgOperand(2));
2875 } else if (IsX86 && (Name.startswith("avx.vperm2f128.") ||
2876 Name == "avx2.vperm2i128")) {
2877 // The immediate permute control byte looks like this:
2878 // [1:0] - select 128 bits from sources for low half of destination
2879 // [2] - ignore
2880 // [3] - zero low half of destination
2881 // [5:4] - select 128 bits from sources for high half of destination
2882 // [6] - ignore
2883 // [7] - zero high half of destination
2884
2885 uint8_t Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
2886
2887 unsigned NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
2888 unsigned HalfSize = NumElts / 2;
2889 SmallVector<int, 8> ShuffleMask(NumElts);
2890
2891 // Determine which operand(s) are actually in use for this instruction.
2892 Value *V0 = (Imm & 0x02) ? CI->getArgOperand(1) : CI->getArgOperand(0);
2893 Value *V1 = (Imm & 0x20) ? CI->getArgOperand(1) : CI->getArgOperand(0);
2894
2895 // If needed, replace operands based on zero mask.
2896 V0 = (Imm & 0x08) ? ConstantAggregateZero::get(CI->getType()) : V0;
2897 V1 = (Imm & 0x80) ? ConstantAggregateZero::get(CI->getType()) : V1;
2898
2899 // Permute low half of result.
2900 unsigned StartIndex = (Imm & 0x01) ? HalfSize : 0;
2901 for (unsigned i = 0; i < HalfSize; ++i)
2902 ShuffleMask[i] = StartIndex + i;
2903
2904 // Permute high half of result.
2905 StartIndex = (Imm & 0x10) ? HalfSize : 0;
2906 for (unsigned i = 0; i < HalfSize; ++i)
2907 ShuffleMask[i + HalfSize] = NumElts + StartIndex + i;
2908
2909 Rep = Builder.CreateShuffleVector(V0, V1, ShuffleMask);
2910
2911 } else if (IsX86 && (Name.startswith("avx.vpermil.") ||
2912 Name == "sse2.pshuf.d" ||
2913 Name.startswith("avx512.mask.vpermil.p") ||
2914 Name.startswith("avx512.mask.pshuf.d."))) {
2915 Value *Op0 = CI->getArgOperand(0);
2916 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
2917 auto *VecTy = cast<FixedVectorType>(CI->getType());
2918 unsigned NumElts = VecTy->getNumElements();
2919 // Calculate the size of each index in the immediate.
2920 unsigned IdxSize = 64 / VecTy->getScalarSizeInBits();
2921 unsigned IdxMask = ((1 << IdxSize) - 1);
2922
2923 SmallVector<int, 8> Idxs(NumElts);
2924 // Lookup the bits for this element, wrapping around the immediate every
2925 // 8-bits. Elements are grouped into sets of 2 or 4 elements so we need
2926 // to offset by the first index of each group.
2927 for (unsigned i = 0; i != NumElts; ++i)
2928 Idxs[i] = ((Imm >> ((i * IdxSize) % 8)) & IdxMask) | (i & ~IdxMask);
2929
2930 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
2931
2932 if (CI->arg_size() == 4)
2933 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
2934 CI->getArgOperand(2));
2935 } else if (IsX86 && (Name == "sse2.pshufl.w" ||
2936 Name.startswith("avx512.mask.pshufl.w."))) {
2937 Value *Op0 = CI->getArgOperand(0);
2938 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
2939 unsigned NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
2940
2941 SmallVector<int, 16> Idxs(NumElts);
2942 for (unsigned l = 0; l != NumElts; l += 8) {
2943 for (unsigned i = 0; i != 4; ++i)
2944 Idxs[i + l] = ((Imm >> (2 * i)) & 0x3) + l;
2945 for (unsigned i = 4; i != 8; ++i)
2946 Idxs[i + l] = i + l;
2947 }
2948
2949 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
2950
2951 if (CI->arg_size() == 4)
2952 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
2953 CI->getArgOperand(2));
2954 } else if (IsX86 && (Name == "sse2.pshufh.w" ||
2955 Name.startswith("avx512.mask.pshufh.w."))) {
2956 Value *Op0 = CI->getArgOperand(0);
2957 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
2958 unsigned NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
2959
2960 SmallVector<int, 16> Idxs(NumElts);
2961 for (unsigned l = 0; l != NumElts; l += 8) {
2962 for (unsigned i = 0; i != 4; ++i)
2963 Idxs[i + l] = i + l;
2964 for (unsigned i = 0; i != 4; ++i)
2965 Idxs[i + l + 4] = ((Imm >> (2 * i)) & 0x3) + 4 + l;
2966 }
2967
2968 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
2969
2970 if (CI->arg_size() == 4)
2971 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
2972 CI->getArgOperand(2));
2973 } else if (IsX86 && Name.startswith("avx512.mask.shuf.p")) {
2974 Value *Op0 = CI->getArgOperand(0);
2975 Value *Op1 = CI->getArgOperand(1);
2976 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
2977 unsigned NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
2978
2979 unsigned NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
2980 unsigned HalfLaneElts = NumLaneElts / 2;
2981
2982 SmallVector<int, 16> Idxs(NumElts);
2983 for (unsigned i = 0; i != NumElts; ++i) {
2984 // Base index is the starting element of the lane.
2985 Idxs[i] = i - (i % NumLaneElts);
2986 // If we are half way through the lane switch to the other source.
2987 if ((i % NumLaneElts) >= HalfLaneElts)
2988 Idxs[i] += NumElts;
2989 // Now select the specific element. By adding HalfLaneElts bits from
2990 // the immediate. Wrapping around the immediate every 8-bits.
2991 Idxs[i] += (Imm >> ((i * HalfLaneElts) % 8)) & ((1 << HalfLaneElts) - 1);
2992 }
2993
2994 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
2995
2996 Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep,
2997 CI->getArgOperand(3));
2998 } else if (IsX86 && (Name.startswith("avx512.mask.movddup") ||
2999 Name.startswith("avx512.mask.movshdup") ||
3000 Name.startswith("avx512.mask.movsldup"))) {
3001 Value *Op0 = CI->getArgOperand(0);
3002 unsigned NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
3003 unsigned NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
3004
3005 unsigned Offset = 0;
3006 if (Name.startswith("avx512.mask.movshdup."))
3007 Offset = 1;
3008
3009 SmallVector<int, 16> Idxs(NumElts);
3010 for (unsigned l = 0; l != NumElts; l += NumLaneElts)
3011 for (unsigned i = 0; i != NumLaneElts; i += 2) {
3012 Idxs[i + l + 0] = i + l + Offset;
3013 Idxs[i + l + 1] = i + l + Offset;
3014 }
3015
3016 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
3017
3018 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
3019 CI->getArgOperand(1));
3020 } else if (IsX86 && (Name.startswith("avx512.mask.punpckl") ||
3021 Name.startswith("avx512.mask.unpckl."))) {
3022 Value *Op0 = CI->getArgOperand(0);
3023 Value *Op1 = CI->getArgOperand(1);
3024 int NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
3025 int NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
3026
3027 SmallVector<int, 64> Idxs(NumElts);
3028 for (int l = 0; l != NumElts; l += NumLaneElts)
3029 for (int i = 0; i != NumLaneElts; ++i)
3030 Idxs[i + l] = l + (i / 2) + NumElts * (i % 2);
3031
3032 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
3033
3034 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3035 CI->getArgOperand(2));
3036 } else if (IsX86 && (Name.startswith("avx512.mask.punpckh") ||
3037 Name.startswith("avx512.mask.unpckh."))) {
3038 Value *Op0 = CI->getArgOperand(0);
3039 Value *Op1 = CI->getArgOperand(1);
3040 int NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
3041 int NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
3042
3043 SmallVector<int, 64> Idxs(NumElts);
3044 for (int l = 0; l != NumElts; l += NumLaneElts)
3045 for (int i = 0; i != NumLaneElts; ++i)
3046 Idxs[i + l] = (NumLaneElts / 2) + l + (i / 2) + NumElts * (i % 2);
3047
3048 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
3049
3050 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3051 CI->getArgOperand(2));
3052 } else if (IsX86 && (Name.startswith("avx512.mask.and.") ||
3053 Name.startswith("avx512.mask.pand."))) {
3054 VectorType *FTy = cast<VectorType>(CI->getType());
3055 VectorType *ITy = VectorType::getInteger(FTy);
3056 Rep = Builder.CreateAnd(Builder.CreateBitCast(CI->getArgOperand(0), ITy),
3057 Builder.CreateBitCast(CI->getArgOperand(1), ITy));
3058 Rep = Builder.CreateBitCast(Rep, FTy);
3059 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3060 CI->getArgOperand(2));
3061 } else if (IsX86 && (Name.startswith("avx512.mask.andn.") ||
3062 Name.startswith("avx512.mask.pandn."))) {
3063 VectorType *FTy = cast<VectorType>(CI->getType());
3064 VectorType *ITy = VectorType::getInteger(FTy);
3065 Rep = Builder.CreateNot(Builder.CreateBitCast(CI->getArgOperand(0), ITy));
3066 Rep = Builder.CreateAnd(Rep,
3067 Builder.CreateBitCast(CI->getArgOperand(1), ITy));
3068 Rep = Builder.CreateBitCast(Rep, FTy);
3069 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3070 CI->getArgOperand(2));
3071 } else if (IsX86 && (Name.startswith("avx512.mask.or.") ||
3072 Name.startswith("avx512.mask.por."))) {
3073 VectorType *FTy = cast<VectorType>(CI->getType());
3074 VectorType *ITy = VectorType::getInteger(FTy);
3075 Rep = Builder.CreateOr(Builder.CreateBitCast(CI->getArgOperand(0), ITy),
3076 Builder.CreateBitCast(CI->getArgOperand(1), ITy));
3077 Rep = Builder.CreateBitCast(Rep, FTy);
3078 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3079 CI->getArgOperand(2));
3080 } else if (IsX86 && (Name.startswith("avx512.mask.xor.") ||
3081 Name.startswith("avx512.mask.pxor."))) {
3082 VectorType *FTy = cast<VectorType>(CI->getType());
3083 VectorType *ITy = VectorType::getInteger(FTy);
3084 Rep = Builder.CreateXor(Builder.CreateBitCast(CI->getArgOperand(0), ITy),
3085 Builder.CreateBitCast(CI->getArgOperand(1), ITy));
3086 Rep = Builder.CreateBitCast(Rep, FTy);
3087 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3088 CI->getArgOperand(2));
3089 } else if (IsX86 && Name.startswith("avx512.mask.padd.")) {
3090 Rep = Builder.CreateAdd(CI->getArgOperand(0), CI->getArgOperand(1));
3091 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3092 CI->getArgOperand(2));
3093 } else if (IsX86 && Name.startswith("avx512.mask.psub.")) {
3094 Rep = Builder.CreateSub(CI->getArgOperand(0), CI->getArgOperand(1));
3095 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3096 CI->getArgOperand(2));
3097 } else if (IsX86 && Name.startswith("avx512.mask.pmull.")) {
3098 Rep = Builder.CreateMul(CI->getArgOperand(0), CI->getArgOperand(1));
3099 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3100 CI->getArgOperand(2));
3101 } else if (IsX86 && Name.startswith("avx512.mask.add.p")) {
3102 if (Name.endswith(".512")) {
3103 Intrinsic::ID IID;
3104 if (Name[17] == 's')
3105 IID = Intrinsic::x86_avx512_add_ps_512;
3106 else
3107 IID = Intrinsic::x86_avx512_add_pd_512;
3108
3109 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
3110 { CI->getArgOperand(0), CI->getArgOperand(1),
3111 CI->getArgOperand(4) });
3112 } else {
3113 Rep = Builder.CreateFAdd(CI->getArgOperand(0), CI->getArgOperand(1));
3114 }
3115 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3116 CI->getArgOperand(2));
3117 } else if (IsX86 && Name.startswith("avx512.mask.div.p")) {
3118 if (Name.endswith(".512")) {
3119 Intrinsic::ID IID;
3120 if (Name[17] == 's')
3121 IID = Intrinsic::x86_avx512_div_ps_512;
3122 else
3123 IID = Intrinsic::x86_avx512_div_pd_512;
3124
3125 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
3126 { CI->getArgOperand(0), CI->getArgOperand(1),
3127 CI->getArgOperand(4) });
3128 } else {
3129 Rep = Builder.CreateFDiv(CI->getArgOperand(0), CI->getArgOperand(1));
3130 }
3131 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3132 CI->getArgOperand(2));
3133 } else if (IsX86 && Name.startswith("avx512.mask.mul.p")) {
3134 if (Name.endswith(".512")) {
3135 Intrinsic::ID IID;
3136 if (Name[17] == 's')
3137 IID = Intrinsic::x86_avx512_mul_ps_512;
3138 else
3139 IID = Intrinsic::x86_avx512_mul_pd_512;
3140
3141 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
3142 { CI->getArgOperand(0), CI->getArgOperand(1),
3143 CI->getArgOperand(4) });
3144 } else {
3145 Rep = Builder.CreateFMul(CI->getArgOperand(0), CI->getArgOperand(1));
3146 }
3147 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3148 CI->getArgOperand(2));
3149 } else if (IsX86 && Name.startswith("avx512.mask.sub.p")) {
3150 if (Name.endswith(".512")) {
3151 Intrinsic::ID IID;
3152 if (Name[17] == 's')
3153 IID = Intrinsic::x86_avx512_sub_ps_512;
3154 else
3155 IID = Intrinsic::x86_avx512_sub_pd_512;
3156
3157 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
3158 { CI->getArgOperand(0), CI->getArgOperand(1),
3159 CI->getArgOperand(4) });
3160 } else {
3161 Rep = Builder.CreateFSub(CI->getArgOperand(0), CI->getArgOperand(1));
3162 }
3163 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3164 CI->getArgOperand(2));
3165 } else if (IsX86 && (Name.startswith("avx512.mask.max.p") ||
3166 Name.startswith("avx512.mask.min.p")) &&
3167 Name.drop_front(18) == ".512") {
3168 bool IsDouble = Name[17] == 'd';
3169 bool IsMin = Name[13] == 'i';
3170 static const Intrinsic::ID MinMaxTbl[2][2] = {
3171 { Intrinsic::x86_avx512_max_ps_512, Intrinsic::x86_avx512_max_pd_512 },
3172 { Intrinsic::x86_avx512_min_ps_512, Intrinsic::x86_avx512_min_pd_512 }
3173 };
3174 Intrinsic::ID IID = MinMaxTbl[IsMin][IsDouble];
3175
3176 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
3177 { CI->getArgOperand(0), CI->getArgOperand(1),
3178 CI->getArgOperand(4) });
3179 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3180 CI->getArgOperand(2));
3181 } else if (IsX86 && Name.startswith("avx512.mask.lzcnt.")) {
3182 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(),
3183 Intrinsic::ctlz,
3184 CI->getType()),
3185 { CI->getArgOperand(0), Builder.getInt1(false) });
3186 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
3187 CI->getArgOperand(1));
3188 } else if (IsX86 && Name.startswith("avx512.mask.psll")) {
3189 bool IsImmediate = Name[16] == 'i' ||
3190 (Name.size() > 18 && Name[18] == 'i');
3191 bool IsVariable = Name[16] == 'v';
3192 char Size = Name[16] == '.' ? Name[17] :
3193 Name[17] == '.' ? Name[18] :
3194 Name[18] == '.' ? Name[19] :
3195 Name[20];
3196
3197 Intrinsic::ID IID;
3198 if (IsVariable && Name[17] != '.') {
3199 if (Size == 'd' && Name[17] == '2') // avx512.mask.psllv2.di
3200 IID = Intrinsic::x86_avx2_psllv_q;
3201 else if (Size == 'd' && Name[17] == '4') // avx512.mask.psllv4.di
3202 IID = Intrinsic::x86_avx2_psllv_q_256;
3203 else if (Size == 's' && Name[17] == '4') // avx512.mask.psllv4.si
3204 IID = Intrinsic::x86_avx2_psllv_d;
3205 else if (Size == 's' && Name[17] == '8') // avx512.mask.psllv8.si
3206 IID = Intrinsic::x86_avx2_psllv_d_256;
3207 else if (Size == 'h' && Name[17] == '8') // avx512.mask.psllv8.hi
3208 IID = Intrinsic::x86_avx512_psllv_w_128;
3209 else if (Size == 'h' && Name[17] == '1') // avx512.mask.psllv16.hi
3210 IID = Intrinsic::x86_avx512_psllv_w_256;
3211 else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psllv32hi
3212 IID = Intrinsic::x86_avx512_psllv_w_512;
3213 else
3214 llvm_unreachable("Unexpected size")::llvm::llvm_unreachable_internal("Unexpected size", "llvm/lib/IR/AutoUpgrade.cpp"
, 3214)
;
3215 } else if (Name.endswith(".128")) {
3216 if (Size == 'd') // avx512.mask.psll.d.128, avx512.mask.psll.di.128
3217 IID = IsImmediate ? Intrinsic::x86_sse2_pslli_d
3218 : Intrinsic::x86_sse2_psll_d;
3219 else if (Size == 'q') // avx512.mask.psll.q.128, avx512.mask.psll.qi.128
3220 IID = IsImmediate ? Intrinsic::x86_sse2_pslli_q
3221 : Intrinsic::x86_sse2_psll_q;
3222 else if (Size == 'w') // avx512.mask.psll.w.128, avx512.mask.psll.wi.128
3223 IID = IsImmediate ? Intrinsic::x86_sse2_pslli_w
3224 : Intrinsic::x86_sse2_psll_w;
3225 else
3226 llvm_unreachable("Unexpected size")::llvm::llvm_unreachable_internal("Unexpected size", "llvm/lib/IR/AutoUpgrade.cpp"
, 3226)
;
3227 } else if (Name.endswith(".256")) {
3228 if (Size == 'd') // avx512.mask.psll.d.256, avx512.mask.psll.di.256
3229 IID = IsImmediate ? Intrinsic::x86_avx2_pslli_d
3230 : Intrinsic::x86_avx2_psll_d;
3231 else if (Size == 'q') // avx512.mask.psll.q.256, avx512.mask.psll.qi.256
3232 IID = IsImmediate ? Intrinsic::x86_avx2_pslli_q
3233 : Intrinsic::x86_avx2_psll_q;
3234 else if (Size == 'w') // avx512.mask.psll.w.256, avx512.mask.psll.wi.256
3235 IID = IsImmediate ? Intrinsic::x86_avx2_pslli_w
3236 : Intrinsic::x86_avx2_psll_w;
3237 else
3238 llvm_unreachable("Unexpected size")::llvm::llvm_unreachable_internal("Unexpected size", "llvm/lib/IR/AutoUpgrade.cpp"
, 3238)
;
3239 } else {
3240 if (Size == 'd') // psll.di.512, pslli.d, psll.d, psllv.d.512
3241 IID = IsImmediate ? Intrinsic::x86_avx512_pslli_d_512 :
3242 IsVariable ? Intrinsic::x86_avx512_psllv_d_512 :
3243 Intrinsic::x86_avx512_psll_d_512;
3244 else if (Size == 'q') // psll.qi.512, pslli.q, psll.q, psllv.q.512
3245 IID = IsImmediate ? Intrinsic::x86_avx512_pslli_q_512 :
3246 IsVariable ? Intrinsic::x86_avx512_psllv_q_512 :
3247 Intrinsic::x86_avx512_psll_q_512;
3248 else if (Size == 'w') // psll.wi.512, pslli.w, psll.w
3249 IID = IsImmediate ? Intrinsic::x86_avx512_pslli_w_512
3250 : Intrinsic::x86_avx512_psll_w_512;
3251 else
3252 llvm_unreachable("Unexpected size")::llvm::llvm_unreachable_internal("Unexpected size", "llvm/lib/IR/AutoUpgrade.cpp"
, 3252)
;
3253 }
3254
3255 Rep = UpgradeX86MaskedShift(Builder, *CI, IID);
3256 } else if (IsX86 && Name.startswith("avx512.mask.psrl")) {
3257 bool IsImmediate = Name[16] == 'i' ||
3258 (Name.size() > 18 && Name[18] == 'i');
3259 bool IsVariable = Name[16] == 'v';
3260 char Size = Name[16] == '.' ? Name[17] :
3261 Name[17] == '.' ? Name[18] :
3262 Name[18] == '.' ? Name[19] :
3263 Name[20];
3264
3265 Intrinsic::ID IID;
3266 if (IsVariable && Name[17] != '.') {
3267 if (Size == 'd' && Name[17] == '2') // avx512.mask.psrlv2.di
3268 IID = Intrinsic::x86_avx2_psrlv_q;
3269 else if (Size == 'd' && Name[17] == '4') // avx512.mask.psrlv4.di
3270 IID = Intrinsic::x86_avx2_psrlv_q_256;
3271 else if (Size == 's' && Name[17] == '4') // avx512.mask.psrlv4.si
3272 IID = Intrinsic::x86_avx2_psrlv_d;
3273 else if (Size == 's' && Name[17] == '8') // avx512.mask.psrlv8.si
3274 IID = Intrinsic::x86_avx2_psrlv_d_256;
3275 else if (Size == 'h' && Name[17] == '8') // avx512.mask.psrlv8.hi
3276 IID = Intrinsic::x86_avx512_psrlv_w_128;
3277 else if (Size == 'h' && Name[17] == '1') // avx512.mask.psrlv16.hi
3278 IID = Intrinsic::x86_avx512_psrlv_w_256;
3279 else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psrlv32hi
3280 IID = Intrinsic::x86_avx512_psrlv_w_512;
3281 else
3282 llvm_unreachable("Unexpected size")::llvm::llvm_unreachable_internal("Unexpected size", "llvm/lib/IR/AutoUpgrade.cpp"
, 3282)
;
3283 } else if (Name.endswith(".128")) {
3284 if (Size == 'd') // avx512.mask.psrl.d.128, avx512.mask.psrl.di.128
3285 IID = IsImmediate ? Intrinsic::x86_sse2_psrli_d
3286 : Intrinsic::x86_sse2_psrl_d;
3287 else if (Size == 'q') // avx512.mask.psrl.q.128, avx512.mask.psrl.qi.128
3288 IID = IsImmediate ? Intrinsic::x86_sse2_psrli_q
3289 : Intrinsic::x86_sse2_psrl_q;
3290 else if (Size == 'w') // avx512.mask.psrl.w.128, avx512.mask.psrl.wi.128
3291 IID = IsImmediate ? Intrinsic::x86_sse2_psrli_w
3292 : Intrinsic::x86_sse2_psrl_w;
3293 else
3294 llvm_unreachable("Unexpected size")::llvm::llvm_unreachable_internal("Unexpected size", "llvm/lib/IR/AutoUpgrade.cpp"
, 3294)
;
3295 } else if (Name.endswith(".256")) {
3296 if (Size == 'd') // avx512.mask.psrl.d.256, avx512.mask.psrl.di.256
3297 IID = IsImmediate ? Intrinsic::x86_avx2_psrli_d
3298 : Intrinsic::x86_avx2_psrl_d;
3299 else if (Size == 'q') // avx512.mask.psrl.q.256, avx512.mask.psrl.qi.256
3300 IID = IsImmediate ? Intrinsic::x86_avx2_psrli_q
3301 : Intrinsic::x86_avx2_psrl_q;
3302 else if (Size == 'w') // avx512.mask.psrl.w.256, avx512.mask.psrl.wi.256
3303 IID = IsImmediate ? Intrinsic::x86_avx2_psrli_w
3304 : Intrinsic::x86_avx2_psrl_w;
3305 else
3306 llvm_unreachable("Unexpected size")::llvm::llvm_unreachable_internal("Unexpected size", "llvm/lib/IR/AutoUpgrade.cpp"
, 3306)
;
3307 } else {
3308 if (Size == 'd') // psrl.di.512, psrli.d, psrl.d, psrl.d.512
3309 IID = IsImmediate ? Intrinsic::x86_avx512_psrli_d_512 :
3310 IsVariable ? Intrinsic::x86_avx512_psrlv_d_512 :
3311 Intrinsic::x86_avx512_psrl_d_512;
3312 else if (Size == 'q') // psrl.qi.512, psrli.q, psrl.q, psrl.q.512
3313 IID = IsImmediate ? Intrinsic::x86_avx512_psrli_q_512 :
3314 IsVariable ? Intrinsic::x86_avx512_psrlv_q_512 :
3315 Intrinsic::x86_avx512_psrl_q_512;
3316 else if (Size == 'w') // psrl.wi.512, psrli.w, psrl.w)
3317 IID = IsImmediate ? Intrinsic::x86_avx512_psrli_w_512
3318 : Intrinsic::x86_avx512_psrl_w_512;
3319 else
3320 llvm_unreachable("Unexpected size")::llvm::llvm_unreachable_internal("Unexpected size", "llvm/lib/IR/AutoUpgrade.cpp"
, 3320)
;
3321 }
3322
3323 Rep = UpgradeX86MaskedShift(Builder, *CI, IID);
3324 } else if (IsX86 && Name.startswith("avx512.mask.psra")) {
3325 bool IsImmediate = Name[16] == 'i' ||
3326 (Name.size() > 18 && Name[18] == 'i');
3327 bool IsVariable = Name[16] == 'v';
3328 char Size = Name[16] == '.' ? Name[17] :
3329 Name[17] == '.' ? Name[18] :
3330 Name[18] == '.' ? Name[19] :
3331 Name[20];
3332
3333 Intrinsic::ID IID;
3334 if (IsVariable && Name[17] != '.') {
3335 if (Size == 's' && Name[17] == '4') // avx512.mask.psrav4.si
3336 IID = Intrinsic::x86_avx2_psrav_d;
3337 else if (Size == 's' && Name[17] == '8') // avx512.mask.psrav8.si
3338 IID = Intrinsic::x86_avx2_psrav_d_256;
3339 else if (Size == 'h' && Name[17] == '8') // avx512.mask.psrav8.hi
3340 IID = Intrinsic::x86_avx512_psrav_w_128;
3341 else if (Size == 'h' && Name[17] == '1') // avx512.mask.psrav16.hi
3342 IID = Intrinsic::x86_avx512_psrav_w_256;
3343 else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psrav32hi
3344 IID = Intrinsic::x86_avx512_psrav_w_512;
3345 else
3346 llvm_unreachable("Unexpected size")::llvm::llvm_unreachable_internal("Unexpected size", "llvm/lib/IR/AutoUpgrade.cpp"
, 3346)
;
3347 } else if (Name.endswith(".128")) {
3348 if (Size == 'd') // avx512.mask.psra.d.128, avx512.mask.psra.di.128
3349 IID = IsImmediate ? Intrinsic::x86_sse2_psrai_d
3350 : Intrinsic::x86_sse2_psra_d;
3351 else if (Size == 'q') // avx512.mask.psra.q.128, avx512.mask.psra.qi.128
3352 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_128 :
3353 IsVariable ? Intrinsic::x86_avx512_psrav_q_128 :
3354 Intrinsic::x86_avx512_psra_q_128;
3355 else if (Size == 'w') // avx512.mask.psra.w.128, avx512.mask.psra.wi.128
3356 IID = IsImmediate ? Intrinsic::x86_sse2_psrai_w
3357 : Intrinsic::x86_sse2_psra_w;
3358 else
3359 llvm_unreachable("Unexpected size")::llvm::llvm_unreachable_internal("Unexpected size", "llvm/lib/IR/AutoUpgrade.cpp"
, 3359)
;
3360 } else if (Name.endswith(".256")) {
3361 if (Size == 'd') // avx512.mask.psra.d.256, avx512.mask.psra.di.256
3362 IID = IsImmediate ? Intrinsic::x86_avx2_psrai_d
3363 : Intrinsic::x86_avx2_psra_d;
3364 else if (Size == 'q') // avx512.mask.psra.q.256, avx512.mask.psra.qi.256
3365 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_256 :
3366 IsVariable ? Intrinsic::x86_avx512_psrav_q_256 :
3367 Intrinsic::x86_avx512_psra_q_256;
3368 else if (Size == 'w') // avx512.mask.psra.w.256, avx512.mask.psra.wi.256
3369 IID = IsImmediate ? Intrinsic::x86_avx2_psrai_w
3370 : Intrinsic::x86_avx2_psra_w;
3371 else
3372 llvm_unreachable("Unexpected size")::llvm::llvm_unreachable_internal("Unexpected size", "llvm/lib/IR/AutoUpgrade.cpp"
, 3372)
;
3373 } else {
3374 if (Size == 'd') // psra.di.512, psrai.d, psra.d, psrav.d.512
3375 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_d_512 :
3376 IsVariable ? Intrinsic::x86_avx512_psrav_d_512 :
3377 Intrinsic::x86_avx512_psra_d_512;
3378 else if (Size == 'q') // psra.qi.512, psrai.q, psra.q
3379 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_512 :
3380 IsVariable ? Intrinsic::x86_avx512_psrav_q_512 :
3381 Intrinsic::x86_avx512_psra_q_512;
3382 else if (Size == 'w') // psra.wi.512, psrai.w, psra.w
3383 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_w_512
3384 : Intrinsic::x86_avx512_psra_w_512;
3385 else
3386 llvm_unreachable("Unexpected size")::llvm::llvm_unreachable_internal("Unexpected size", "llvm/lib/IR/AutoUpgrade.cpp"
, 3386)
;
3387 }
3388
3389 Rep = UpgradeX86MaskedShift(Builder, *CI, IID);
3390 } else if (IsX86 && Name.startswith("avx512.mask.move.s")) {
3391 Rep = upgradeMaskedMove(Builder, *CI);
3392 } else if (IsX86 && Name.startswith("avx512.cvtmask2")) {
3393 Rep = UpgradeMaskToInt(Builder, *CI);
3394 } else if (IsX86 && Name.endswith(".movntdqa")) {
3395 Module *M = F->getParent();
3396 MDNode *Node = MDNode::get(
3397 C, ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1)));
3398
3399 Value *Ptr = CI->getArgOperand(0);
3400
3401 // Convert the type of the pointer to a pointer to the stored type.
3402 Value *BC = Builder.CreateBitCast(
3403 Ptr, PointerType::getUnqual(CI->getType()), "cast");
3404 LoadInst *LI = Builder.CreateAlignedLoad(
3405 CI->getType(), BC,
3406 Align(CI->getType()->getPrimitiveSizeInBits().getFixedSize() / 8));
3407 LI->setMetadata(M->getMDKindID("nontemporal"), Node);
3408 Rep = LI;
3409 } else if (IsX86 && (Name.startswith("fma.vfmadd.") ||
3410 Name.startswith("fma.vfmsub.") ||
3411 Name.startswith("fma.vfnmadd.") ||
3412 Name.startswith("fma.vfnmsub."))) {
3413 bool NegMul = Name[6] == 'n';
3414 bool NegAcc = NegMul ? Name[8] == 's' : Name[7] == 's';
3415 bool IsScalar = NegMul ? Name[12] == 's' : Name[11] == 's';
3416
3417 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1),
3418 CI->getArgOperand(2) };
3419
3420 if (IsScalar) {
3421 Ops[0] = Builder.CreateExtractElement(Ops[0], (uint64_t)0);
3422 Ops[1] = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
3423 Ops[2] = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
3424 }
3425
3426 if (NegMul && !IsScalar)
3427 Ops[0] = Builder.CreateFNeg(Ops[0]);
3428 if (NegMul && IsScalar)
3429 Ops[1] = Builder.CreateFNeg(Ops[1]);
3430 if (NegAcc)
3431 Ops[2] = Builder.CreateFNeg(Ops[2]);
3432
3433 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(),
3434 Intrinsic::fma,
3435 Ops[0]->getType()),
3436 Ops);
3437
3438 if (IsScalar)
3439 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep,
3440 (uint64_t)0);
3441 } else if (IsX86 && Name.startswith("fma4.vfmadd.s")) {
3442 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1),
3443 CI->getArgOperand(2) };
3444
3445 Ops[0] = Builder.CreateExtractElement(Ops[0], (uint64_t)0);
3446 Ops[1] = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
3447 Ops[2] = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
3448
3449 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(),
3450 Intrinsic::fma,
3451 Ops[0]->getType()),
3452 Ops);
3453
3454 Rep = Builder.CreateInsertElement(Constant::getNullValue(CI->getType()),
3455 Rep, (uint64_t)0);
3456 } else if (IsX86 && (Name.startswith("avx512.mask.vfmadd.s") ||
3457 Name.startswith("avx512.maskz.vfmadd.s") ||
3458 Name.startswith("avx512.mask3.vfmadd.s") ||
3459 Name.startswith("avx512.mask3.vfmsub.s") ||
3460 Name.startswith("avx512.mask3.vfnmsub.s"))) {
3461 bool IsMask3 = Name[11] == '3';
3462 bool IsMaskZ = Name[11] == 'z';
3463 // Drop the "avx512.mask." to make it easier.
3464 Name = Name.drop_front(IsMask3 || IsMaskZ ? 13 : 12);
3465 bool NegMul = Name[2] == 'n';
3466 bool NegAcc = NegMul ? Name[4] == 's' : Name[3] == 's';
3467
3468 Value *A = CI->getArgOperand(0);
3469 Value *B = CI->getArgOperand(1);
3470 Value *C = CI->getArgOperand(2);
3471
3472 if (NegMul && (IsMask3 || IsMaskZ))
3473 A = Builder.CreateFNeg(A);
3474 if (NegMul && !(IsMask3 || IsMaskZ))
3475 B = Builder.CreateFNeg(B);
3476 if (NegAcc)
3477 C = Builder.CreateFNeg(C);
3478
3479 A = Builder.CreateExtractElement(A, (uint64_t)0);
3480 B = Builder.CreateExtractElement(B, (uint64_t)0);
3481 C = Builder.CreateExtractElement(C, (uint64_t)0);
3482
3483 if (!isa<ConstantInt>(CI->getArgOperand(4)) ||
3484 cast<ConstantInt>(CI->getArgOperand(4))->getZExtValue() != 4) {
3485 Value *Ops[] = { A, B, C, CI->getArgOperand(4) };
3486
3487 Intrinsic::ID IID;
3488 if (Name.back() == 'd')
3489 IID = Intrinsic::x86_avx512_vfmadd_f64;
3490 else
3491 IID = Intrinsic::x86_avx512_vfmadd_f32;
3492 Function *FMA = Intrinsic::getDeclaration(CI->getModule(), IID);
3493 Rep = Builder.CreateCall(FMA, Ops);
3494 } else {
3495 Function *FMA = Intrinsic::getDeclaration(CI->getModule(),
3496 Intrinsic::fma,
3497 A->getType());
3498 Rep = Builder.CreateCall(FMA, { A, B, C });
3499 }
3500
3501 Value *PassThru = IsMaskZ ? Constant::getNullValue(Rep->getType()) :
3502 IsMask3 ? C : A;
3503
3504 // For Mask3 with NegAcc, we need to create a new extractelement that
3505 // avoids the negation above.
3506 if (NegAcc && IsMask3)
3507 PassThru = Builder.CreateExtractElement(CI->getArgOperand(2),
3508 (uint64_t)0);
3509
3510 Rep = EmitX86ScalarSelect(Builder, CI->getArgOperand(3),
3511 Rep, PassThru);
3512 Rep = Builder.CreateInsertElement(CI->getArgOperand(IsMask3 ? 2 : 0),
3513 Rep, (uint64_t)0);
3514 } else if (IsX86 && (Name.startswith("avx512.mask.vfmadd.p") ||
3515 Name.startswith("avx512.mask.vfnmadd.p") ||
3516 Name.startswith("avx512.mask.vfnmsub.p") ||
3517 Name.startswith("avx512.mask3.vfmadd.p") ||
3518 Name.startswith("avx512.mask3.vfmsub.p") ||
3519 Name.startswith("avx512.mask3.vfnmsub.p") ||
3520 Name.startswith("avx512.maskz.vfmadd.p"))) {
3521 bool IsMask3 = Name[11] == '3';
3522 bool IsMaskZ = Name[11] == 'z';
3523 // Drop the "avx512.mask." to make it easier.
3524 Name = Name.drop_front(IsMask3 || IsMaskZ ? 13 : 12);
3525 bool NegMul = Name[2] == 'n';
3526 bool NegAcc = NegMul ? Name[4] == 's' : Name[3] == 's';
3527
3528 Value *A = CI->getArgOperand(0);
3529 Value *B = CI->getArgOperand(1);
3530 Value *C = CI->getArgOperand(2);
3531
3532 if (NegMul && (IsMask3 || IsMaskZ))
3533 A = Builder.CreateFNeg(A);
3534 if (NegMul && !(IsMask3 || IsMaskZ))
3535 B = Builder.CreateFNeg(B);
3536 if (NegAcc)
3537 C = Builder.CreateFNeg(C);
3538
3539 if (CI->arg_size() == 5 &&
3540 (!isa<ConstantInt>(CI->getArgOperand(4)) ||
3541 cast<ConstantInt>(CI->getArgOperand(4))->getZExtValue() != 4)) {
3542 Intrinsic::ID IID;
3543 // Check the character before ".512" in string.
3544 if (Name[Name.size()-5] == 's')
3545 IID = Intrinsic::x86_avx512_vfmadd_ps_512;
3546 else
3547 IID = Intrinsic::x86_avx512_vfmadd_pd_512;
3548
3549 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
3550 { A, B, C, CI->getArgOperand(4) });
3551 } else {
3552 Function *FMA = Intrinsic::getDeclaration(CI->getModule(),
3553 Intrinsic::fma,
3554 A->getType());
3555 Rep = Builder.CreateCall(FMA, { A, B, C });
3556 }
3557
3558 Value *PassThru = IsMaskZ ? llvm::Constant::getNullValue(CI->getType()) :
3559 IsMask3 ? CI->getArgOperand(2) :
3560 CI->getArgOperand(0);
3561
3562 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
3563 } else if (IsX86 && Name.startswith("fma.vfmsubadd.p")) {
3564 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
3565 unsigned EltWidth = CI->getType()->getScalarSizeInBits();
3566 Intrinsic::ID IID;
3567 if (VecWidth == 128 && EltWidth == 32)
3568 IID = Intrinsic::x86_fma_vfmaddsub_ps;
3569 else if (VecWidth == 256 && EltWidth == 32)
3570 IID = Intrinsic::x86_fma_vfmaddsub_ps_256;
3571 else if (VecWidth == 128 && EltWidth == 64)
3572 IID = Intrinsic::x86_fma_vfmaddsub_pd;
3573 else if (VecWidth == 256 && EltWidth == 64)
3574 IID = Intrinsic::x86_fma_vfmaddsub_pd_256;
3575 else
3576 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 3576)
;
3577
3578 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1),
3579 CI->getArgOperand(2) };
3580 Ops[2] = Builder.CreateFNeg(Ops[2]);
3581 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
3582 Ops);
3583 } else if (IsX86 && (Name.startswith("avx512.mask.vfmaddsub.p") ||
3584 Name.startswith("avx512.mask3.vfmaddsub.p") ||
3585 Name.startswith("avx512.maskz.vfmaddsub.p") ||
3586 Name.startswith("avx512.mask3.vfmsubadd.p"))) {
3587 bool IsMask3 = Name[11] == '3';
3588 bool IsMaskZ = Name[11] == 'z';
3589 // Drop the "avx512.mask." to make it easier.
3590 Name = Name.drop_front(IsMask3 || IsMaskZ ? 13 : 12);
3591 bool IsSubAdd = Name[3] == 's';
3592 if (CI->arg_size() == 5) {
3593 Intrinsic::ID IID;
3594 // Check the character before ".512" in string.
3595 if (Name[Name.size()-5] == 's')
3596 IID = Intrinsic::x86_avx512_vfmaddsub_ps_512;
3597 else
3598 IID = Intrinsic::x86_avx512_vfmaddsub_pd_512;
3599
3600 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1),
3601 CI->getArgOperand(2), CI->getArgOperand(4) };
3602 if (IsSubAdd)
3603 Ops[2] = Builder.CreateFNeg(Ops[2]);
3604
3605 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
3606 Ops);
3607 } else {
3608 int NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
3609
3610 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1),
3611 CI->getArgOperand(2) };
3612
3613 Function *FMA = Intrinsic::getDeclaration(CI->getModule(), Intrinsic::fma,
3614 Ops[0]->getType());
3615 Value *Odd = Builder.CreateCall(FMA, Ops);
3616 Ops[2] = Builder.CreateFNeg(Ops[2]);
3617 Value *Even = Builder.CreateCall(FMA, Ops);
3618
3619 if (IsSubAdd)
3620 std::swap(Even, Odd);
3621
3622 SmallVector<int, 32> Idxs(NumElts);
3623 for (int i = 0; i != NumElts; ++i)
3624 Idxs[i] = i + (i % 2) * NumElts;
3625
3626 Rep = Builder.CreateShuffleVector(Even, Odd, Idxs);
3627 }
3628
3629 Value *PassThru = IsMaskZ ? llvm::Constant::getNullValue(CI->getType()) :
3630 IsMask3 ? CI->getArgOperand(2) :
3631 CI->getArgOperand(0);
3632
3633 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
3634 } else if (IsX86 && (Name.startswith("avx512.mask.pternlog.") ||
3635 Name.startswith("avx512.maskz.pternlog."))) {
3636 bool ZeroMask = Name[11] == 'z';
3637 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
3638 unsigned EltWidth = CI->getType()->getScalarSizeInBits();
3639 Intrinsic::ID IID;
3640 if (VecWidth == 128 && EltWidth == 32)
3641 IID = Intrinsic::x86_avx512_pternlog_d_128;
3642 else if (VecWidth == 256 && EltWidth == 32)
3643 IID = Intrinsic::x86_avx512_pternlog_d_256;
3644 else if (VecWidth == 512 && EltWidth == 32)
3645 IID = Intrinsic::x86_avx512_pternlog_d_512;
3646 else if (VecWidth == 128 && EltWidth == 64)
3647 IID = Intrinsic::x86_avx512_pternlog_q_128;
3648 else if (VecWidth == 256 && EltWidth == 64)
3649 IID = Intrinsic::x86_avx512_pternlog_q_256;
3650 else if (VecWidth == 512 && EltWidth == 64)
3651 IID = Intrinsic::x86_avx512_pternlog_q_512;
3652 else
3653 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 3653)
;
3654
3655 Value *Args[] = { CI->getArgOperand(0) , CI->getArgOperand(1),
3656 CI->getArgOperand(2), CI->getArgOperand(3) };
3657 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID),
3658 Args);
3659 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType())
3660 : CI->getArgOperand(0);
3661 Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep, PassThru);
3662 } else if (IsX86 && (Name.startswith("avx512.mask.vpmadd52") ||
3663 Name.startswith("avx512.maskz.vpmadd52"))) {
3664 bool ZeroMask = Name[11] == 'z';
3665 bool High = Name[20] == 'h' || Name[21] == 'h';
3666 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
3667 Intrinsic::ID IID;
3668 if (VecWidth == 128 && !High)
3669 IID = Intrinsic::x86_avx512_vpmadd52l_uq_128;
3670 else if (VecWidth == 256 && !High)
3671 IID = Intrinsic::x86_avx512_vpmadd52l_uq_256;
3672 else if (VecWidth == 512 && !High)
3673 IID = Intrinsic::x86_avx512_vpmadd52l_uq_512;
3674 else if (VecWidth == 128 && High)
3675 IID = Intrinsic::x86_avx512_vpmadd52h_uq_128;
3676 else if (VecWidth == 256 && High)
3677 IID = Intrinsic::x86_avx512_vpmadd52h_uq_256;
3678 else if (VecWidth == 512 && High)
3679 IID = Intrinsic::x86_avx512_vpmadd52h_uq_512;
3680 else
3681 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 3681)
;
3682
3683 Value *Args[] = { CI->getArgOperand(0) , CI->getArgOperand(1),
3684 CI->getArgOperand(2) };
3685 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID),
3686 Args);
3687 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType())
3688 : CI->getArgOperand(0);
3689 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
3690 } else if (IsX86 && (Name.startswith("avx512.mask.vpermi2var.") ||
3691 Name.startswith("avx512.mask.vpermt2var.") ||
3692 Name.startswith("avx512.maskz.vpermt2var."))) {
3693 bool ZeroMask = Name[11] == 'z';
3694 bool IndexForm = Name[17] == 'i';
3695 Rep = UpgradeX86VPERMT2Intrinsics(Builder, *CI, ZeroMask, IndexForm);
3696 } else if (IsX86 && (Name.startswith("avx512.mask.vpdpbusd.") ||
3697 Name.startswith("avx512.maskz.vpdpbusd.") ||
3698 Name.startswith("avx512.mask.vpdpbusds.") ||
3699 Name.startswith("avx512.maskz.vpdpbusds."))) {
3700 bool ZeroMask = Name[11] == 'z';
3701 bool IsSaturating = Name[ZeroMask ? 21 : 20] == 's';
3702 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
3703 Intrinsic::ID IID;
3704 if (VecWidth == 128 && !IsSaturating)
3705 IID = Intrinsic::x86_avx512_vpdpbusd_128;
3706 else if (VecWidth == 256 && !IsSaturating)
3707 IID = Intrinsic::x86_avx512_vpdpbusd_256;
3708 else if (VecWidth == 512 && !IsSaturating)
3709 IID = Intrinsic::x86_avx512_vpdpbusd_512;
3710 else if (VecWidth == 128 && IsSaturating)
3711 IID = Intrinsic::x86_avx512_vpdpbusds_128;
3712 else if (VecWidth == 256 && IsSaturating)
3713 IID = Intrinsic::x86_avx512_vpdpbusds_256;
3714 else if (VecWidth == 512 && IsSaturating)
3715 IID = Intrinsic::x86_avx512_vpdpbusds_512;
3716 else
3717 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 3717)
;
3718
3719 Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(1),
3720 CI->getArgOperand(2) };
3721 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID),
3722 Args);
3723 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType())
3724 : CI->getArgOperand(0);
3725 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
3726 } else if (IsX86 && (Name.startswith("avx512.mask.vpdpwssd.") ||
3727 Name.startswith("avx512.maskz.vpdpwssd.") ||
3728 Name.startswith("avx512.mask.vpdpwssds.") ||
3729 Name.startswith("avx512.maskz.vpdpwssds."))) {
3730 bool ZeroMask = Name[11] == 'z';
3731 bool IsSaturating = Name[ZeroMask ? 21 : 20] == 's';
3732 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
3733 Intrinsic::ID IID;
3734 if (VecWidth == 128 && !IsSaturating)
3735 IID = Intrinsic::x86_avx512_vpdpwssd_128;
3736 else if (VecWidth == 256 && !IsSaturating)
3737 IID = Intrinsic::x86_avx512_vpdpwssd_256;
3738 else if (VecWidth == 512 && !IsSaturating)
3739 IID = Intrinsic::x86_avx512_vpdpwssd_512;
3740 else if (VecWidth == 128 && IsSaturating)
3741 IID = Intrinsic::x86_avx512_vpdpwssds_128;
3742 else if (VecWidth == 256 && IsSaturating)
3743 IID = Intrinsic::x86_avx512_vpdpwssds_256;
3744 else if (VecWidth == 512 && IsSaturating)
3745 IID = Intrinsic::x86_avx512_vpdpwssds_512;
3746 else
3747 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 3747)
;
3748
3749 Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(1),
3750 CI->getArgOperand(2) };
3751 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID),
3752 Args);
3753 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType())
3754 : CI->getArgOperand(0);
3755 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
3756 } else if (IsX86 && (Name == "addcarryx.u32" || Name == "addcarryx.u64" ||
3757 Name == "addcarry.u32" || Name == "addcarry.u64" ||
3758 Name == "subborrow.u32" || Name == "subborrow.u64")) {
3759 Intrinsic::ID IID;
3760 if (Name[0] == 'a' && Name.back() == '2')
3761 IID = Intrinsic::x86_addcarry_32;
3762 else if (Name[0] == 'a' && Name.back() == '4')
3763 IID = Intrinsic::x86_addcarry_64;
3764 else if (Name[0] == 's' && Name.back() == '2')
3765 IID = Intrinsic::x86_subborrow_32;
3766 else if (Name[0] == 's' && Name.back() == '4')
3767 IID = Intrinsic::x86_subborrow_64;
3768 else
3769 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 3769)
;
3770
3771 // Make a call with 3 operands.
3772 Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(1),
3773 CI->getArgOperand(2)};
3774 Value *NewCall = Builder.CreateCall(
3775 Intrinsic::getDeclaration(CI->getModule(), IID),
3776 Args);
3777
3778 // Extract the second result and store it.
3779 Value *Data = Builder.CreateExtractValue(NewCall, 1);
3780 // Cast the pointer to the right type.
3781 Value *Ptr = Builder.CreateBitCast(CI->getArgOperand(3),
3782 llvm::PointerType::getUnqual(Data->getType()));
3783 Builder.CreateAlignedStore(Data, Ptr, Align(1));
3784 // Replace the original call result with the first result of the new call.
3785 Value *CF = Builder.CreateExtractValue(NewCall, 0);
3786
3787 CI->replaceAllUsesWith(CF);
3788 Rep = nullptr;
3789 } else if (IsX86 && Name.startswith("avx512.mask.") &&
3790 upgradeAVX512MaskToSelect(Name, Builder, *CI, Rep)) {
3791 // Rep will be updated by the call in the condition.
3792 } else if (IsNVVM && (Name == "abs.i" || Name == "abs.ll")) {
3793 Value *Arg = CI->getArgOperand(0);
3794 Value *Neg = Builder.CreateNeg(Arg, "neg");
3795 Value *Cmp = Builder.CreateICmpSGE(
3796 Arg, llvm::Constant::getNullValue(Arg->getType()), "abs.cond");
3797 Rep = Builder.CreateSelect(Cmp, Arg, Neg, "abs");
3798 } else if (IsNVVM && (Name.startswith("atomic.load.add.f32.p") ||
3799 Name.startswith("atomic.load.add.f64.p"))) {
3800 Value *Ptr = CI->getArgOperand(0);
3801 Value *Val = CI->getArgOperand(1);
3802 Rep = Builder.CreateAtomicRMW(AtomicRMWInst::FAdd, Ptr, Val, MaybeAlign(),
3803 AtomicOrdering::SequentiallyConsistent);
3804 } else if (IsNVVM && (Name == "max.i" || Name == "max.ll" ||
3805 Name == "max.ui" || Name == "max.ull")) {
3806 Value *Arg0 = CI->getArgOperand(0);
3807 Value *Arg1 = CI->getArgOperand(1);
3808 Value *Cmp = Name.endswith(".ui") || Name.endswith(".ull")
3809 ? Builder.CreateICmpUGE(Arg0, Arg1, "max.cond")
3810 : Builder.CreateICmpSGE(Arg0, Arg1, "max.cond");
3811 Rep = Builder.CreateSelect(Cmp, Arg0, Arg1, "max");
3812 } else if (IsNVVM && (Name == "min.i" || Name == "min.ll" ||
3813 Name == "min.ui" || Name == "min.ull")) {
3814 Value *Arg0 = CI->getArgOperand(0);
3815 Value *Arg1 = CI->getArgOperand(1);
3816 Value *Cmp = Name.endswith(".ui") || Name.endswith(".ull")
3817 ? Builder.CreateICmpULE(Arg0, Arg1, "min.cond")
3818 : Builder.CreateICmpSLE(Arg0, Arg1, "min.cond");
3819 Rep = Builder.CreateSelect(Cmp, Arg0, Arg1, "min");
3820 } else if (IsNVVM && Name == "clz.ll") {
3821 // llvm.nvvm.clz.ll returns an i32, but llvm.ctlz.i64 and returns an i64.
3822 Value *Arg = CI->getArgOperand(0);
3823 Value *Ctlz = Builder.CreateCall(
3824 Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctlz,
3825 {Arg->getType()}),
3826 {Arg, Builder.getFalse()}, "ctlz");
3827 Rep = Builder.CreateTrunc(Ctlz, Builder.getInt32Ty(), "ctlz.trunc");
3828 } else if (IsNVVM && Name == "popc.ll") {
3829 // llvm.nvvm.popc.ll returns an i32, but llvm.ctpop.i64 and returns an
3830 // i64.
3831 Value *Arg = CI->getArgOperand(0);
3832 Value *Popc = Builder.CreateCall(
3833 Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctpop,
3834 {Arg->getType()}),
3835 Arg, "ctpop");
3836 Rep = Builder.CreateTrunc(Popc, Builder.getInt32Ty(), "ctpop.trunc");
3837 } else if (IsNVVM && Name == "h2f") {
3838 Rep = Builder.CreateCall(Intrinsic::getDeclaration(
3839 F->getParent(), Intrinsic::convert_from_fp16,
3840 {Builder.getFloatTy()}),
3841 CI->getArgOperand(0), "h2f");
3842 } else if (IsARM) {
3843 Rep = UpgradeARMIntrinsicCall(Name, CI, F, Builder);
3844 } else {
3845 llvm_unreachable("Unknown function for CallBase upgrade.")::llvm::llvm_unreachable_internal("Unknown function for CallBase upgrade."
, "llvm/lib/IR/AutoUpgrade.cpp", 3845)
;
3846 }
3847
3848 if (Rep)
3849 CI->replaceAllUsesWith(Rep);
3850 CI->eraseFromParent();
3851 return;
3852 }
3853
3854 const auto &DefaultCase = [&]() -> void {
3855 if (CI->getFunctionType() == NewFn->getFunctionType()) {
3856 // Handle generic mangling change.
3857 assert((static_cast <bool> ((CI->getCalledFunction()->getName
() != NewFn->getName()) && "Unknown function for CallBase upgrade and isn't just a name change"
) ? void (0) : __assert_fail ("(CI->getCalledFunction()->getName() != NewFn->getName()) && \"Unknown function for CallBase upgrade and isn't just a name change\""
, "llvm/lib/IR/AutoUpgrade.cpp", 3859, __extension__ __PRETTY_FUNCTION__
))
3858 (CI->getCalledFunction()->getName() != NewFn->getName()) &&(static_cast <bool> ((CI->getCalledFunction()->getName
() != NewFn->getName()) && "Unknown function for CallBase upgrade and isn't just a name change"
) ? void (0) : __assert_fail ("(CI->getCalledFunction()->getName() != NewFn->getName()) && \"Unknown function for CallBase upgrade and isn't just a name change\""
, "llvm/lib/IR/AutoUpgrade.cpp", 3859, __extension__ __PRETTY_FUNCTION__
))
3859 "Unknown function for CallBase upgrade and isn't just a name change")(static_cast <bool> ((CI->getCalledFunction()->getName
() != NewFn->getName()) && "Unknown function for CallBase upgrade and isn't just a name change"
) ? void (0) : __assert_fail ("(CI->getCalledFunction()->getName() != NewFn->getName()) && \"Unknown function for CallBase upgrade and isn't just a name change\""
, "llvm/lib/IR/AutoUpgrade.cpp", 3859, __extension__ __PRETTY_FUNCTION__
))
;
3860 CI->setCalledFunction(NewFn);
3861 return;
3862 }
3863
3864 // This must be an upgrade from a named to a literal struct.
3865 auto *OldST = cast<StructType>(CI->getType());
3866 assert(OldST != NewFn->getReturnType() && "Return type must have changed")(static_cast <bool> (OldST != NewFn->getReturnType()
&& "Return type must have changed") ? void (0) : __assert_fail
("OldST != NewFn->getReturnType() && \"Return type must have changed\""
, "llvm/lib/IR/AutoUpgrade.cpp", 3866, __extension__ __PRETTY_FUNCTION__
))
;
3867 assert(OldST->getNumElements() ==(static_cast <bool> (OldST->getNumElements() == cast
<StructType>(NewFn->getReturnType())->getNumElements
() && "Must have same number of elements") ? void (0)
: __assert_fail ("OldST->getNumElements() == cast<StructType>(NewFn->getReturnType())->getNumElements() && \"Must have same number of elements\""
, "llvm/lib/IR/AutoUpgrade.cpp", 3869, __extension__ __PRETTY_FUNCTION__
))
3868 cast<StructType>(NewFn->getReturnType())->getNumElements() &&(static_cast <bool> (OldST->getNumElements() == cast
<StructType>(NewFn->getReturnType())->getNumElements
() && "Must have same number of elements") ? void (0)
: __assert_fail ("OldST->getNumElements() == cast<StructType>(NewFn->getReturnType())->getNumElements() && \"Must have same number of elements\""
, "llvm/lib/IR/AutoUpgrade.cpp", 3869, __extension__ __PRETTY_FUNCTION__
))
3869 "Must have same number of elements")(static_cast <bool> (OldST->getNumElements() == cast
<StructType>(NewFn->getReturnType())->getNumElements
() && "Must have same number of elements") ? void (0)
: __assert_fail ("OldST->getNumElements() == cast<StructType>(NewFn->getReturnType())->getNumElements() && \"Must have same number of elements\""
, "llvm/lib/IR/AutoUpgrade.cpp", 3869, __extension__ __PRETTY_FUNCTION__
))
;
3870
3871 SmallVector<Value *> Args(CI->args());
3872 Value *NewCI = Builder.CreateCall(NewFn, Args);
3873 Value *Res = PoisonValue::get(OldST);
3874 for (unsigned Idx = 0; Idx < OldST->getNumElements(); ++Idx) {
3875 Value *Elem = Builder.CreateExtractValue(NewCI, Idx);
3876 Res = Builder.CreateInsertValue(Res, Elem, Idx);
3877 }
3878 CI->replaceAllUsesWith(Res);
3879 CI->eraseFromParent();
3880 return;
3881 };
3882 CallInst *NewCall = nullptr;
3883 switch (NewFn->getIntrinsicID()) {
3884 default: {
3885 DefaultCase();
3886 return;
3887 }
3888 case Intrinsic::arm_neon_vst1:
3889 case Intrinsic::arm_neon_vst2:
3890 case Intrinsic::arm_neon_vst3:
3891 case Intrinsic::arm_neon_vst4:
3892 case Intrinsic::arm_neon_vst2lane:
3893 case Intrinsic::arm_neon_vst3lane:
3894 case Intrinsic::arm_neon_vst4lane: {
3895 SmallVector<Value *, 4> Args(CI->args());
3896 NewCall = Builder.CreateCall(NewFn, Args);
3897 break;
3898 }
3899 case Intrinsic::aarch64_sve_ld3_sret:
3900 case Intrinsic::aarch64_sve_ld4_sret:
3901 case Intrinsic::aarch64_sve_ld2_sret: {
3902 StringRef Name = F->getName();
3903 Name = Name.substr(5);
3904 unsigned N = StringSwitch<unsigned>(Name)
3905 .StartsWith("aarch64.sve.ld2", 2)
3906 .StartsWith("aarch64.sve.ld3", 3)
3907 .StartsWith("aarch64.sve.ld4", 4)
3908 .Default(0);
3909 ScalableVectorType *RetTy =
3910 dyn_cast<ScalableVectorType>(F->getReturnType());
3911 unsigned MinElts = RetTy->getMinNumElements() / N;
3912 SmallVector<Value *, 2> Args(CI->args());
3913 Value *NewLdCall = Builder.CreateCall(NewFn, Args);
3914 Value *Ret = llvm::PoisonValue::get(RetTy);
3915 for (unsigned I = 0; I < N; I++) {
3916 Value *Idx = ConstantInt::get(Type::getInt64Ty(C), I * MinElts);
3917 Value *SRet = Builder.CreateExtractValue(NewLdCall, I);
3918 Ret = Builder.CreateInsertVector(RetTy, Ret, SRet, Idx);
3919 }
3920 NewCall = dyn_cast<CallInst>(Ret);
3921 break;
3922 }
3923
3924 case Intrinsic::vector_extract: {
3925 StringRef Name = F->getName();
3926 Name = Name.substr(5); // Strip llvm
3927 if (!Name.startswith("aarch64.sve.tuple.get")) {
3928 DefaultCase();
3929 return;
3930 }
3931 ScalableVectorType *RetTy =
3932 dyn_cast<ScalableVectorType>(F->getReturnType());
3933 unsigned MinElts = RetTy->getMinNumElements();
3934 unsigned I = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
3935 Value *NewIdx = ConstantInt::get(Type::getInt64Ty(C), I * MinElts);
3936 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0), NewIdx});
3937 break;
3938 }
3939
3940 case Intrinsic::vector_insert: {
3941 StringRef Name = F->getName();
3942 Name = Name.substr(5);
3943 if (!Name.startswith("aarch64.sve.tuple")) {
3944 DefaultCase();
3945 return;
3946 }
3947 if (Name.startswith("aarch64.sve.tuple.set")) {
3948 unsigned I = dyn_cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
3949 ScalableVectorType *Ty =
3950 dyn_cast<ScalableVectorType>(CI->getArgOperand(2)->getType());
3951 Value *NewIdx =
3952 ConstantInt::get(Type::getInt64Ty(C), I * Ty->getMinNumElements());
3953 NewCall = Builder.CreateCall(
3954 NewFn, {CI->getArgOperand(0), CI->getArgOperand(2), NewIdx});
3955 break;
3956 }
3957 if (Name.startswith("aarch64.sve.tuple.create")) {
3958 unsigned N = StringSwitch<unsigned>(Name)
3959 .StartsWith("aarch64.sve.tuple.create2", 2)
3960 .StartsWith("aarch64.sve.tuple.create3", 3)
3961 .StartsWith("aarch64.sve.tuple.create4", 4)
3962 .Default(0);
3963 assert(N > 1 && "Create is expected to be between 2-4")(static_cast <bool> (N > 1 && "Create is expected to be between 2-4"
) ? void (0) : __assert_fail ("N > 1 && \"Create is expected to be between 2-4\""
, "llvm/lib/IR/AutoUpgrade.cpp", 3963, __extension__ __PRETTY_FUNCTION__
))
;
3964 ScalableVectorType *RetTy =
3965 dyn_cast<ScalableVectorType>(F->getReturnType());
3966 Value *Ret = llvm::PoisonValue::get(RetTy);
3967 unsigned MinElts = RetTy->getMinNumElements() / N;
3968 for (unsigned I = 0; I < N; I++) {
3969 Value *Idx = ConstantInt::get(Type::getInt64Ty(C), I * MinElts);
3970 Value *V = CI->getArgOperand(I);
3971 Ret = Builder.CreateInsertVector(RetTy, Ret, V, Idx);
3972 }
3973 NewCall = dyn_cast<CallInst>(Ret);
3974 }
3975 break;
3976 }
3977
3978 case Intrinsic::arm_neon_bfdot:
3979 case Intrinsic::arm_neon_bfmmla:
3980 case Intrinsic::arm_neon_bfmlalb:
3981 case Intrinsic::arm_neon_bfmlalt:
3982 case Intrinsic::aarch64_neon_bfdot:
3983 case Intrinsic::aarch64_neon_bfmmla:
3984 case Intrinsic::aarch64_neon_bfmlalb:
3985 case Intrinsic::aarch64_neon_bfmlalt: {
3986 SmallVector<Value *, 3> Args;
3987 assert(CI->arg_size() == 3 &&(static_cast <bool> (CI->arg_size() == 3 && "Mismatch between function args and call args"
) ? void (0) : __assert_fail ("CI->arg_size() == 3 && \"Mismatch between function args and call args\""
, "llvm/lib/IR/AutoUpgrade.cpp", 3988, __extension__ __PRETTY_FUNCTION__
))
3988 "Mismatch between function args and call args")(static_cast <bool> (CI->arg_size() == 3 && "Mismatch between function args and call args"
) ? void (0) : __assert_fail ("CI->arg_size() == 3 && \"Mismatch between function args and call args\""
, "llvm/lib/IR/AutoUpgrade.cpp", 3988, __extension__ __PRETTY_FUNCTION__
))
;
3989 size_t OperandWidth =
3990 CI->getArgOperand(1)->getType()->getPrimitiveSizeInBits();
3991 assert((OperandWidth == 64 || OperandWidth == 128) &&(static_cast <bool> ((OperandWidth == 64 || OperandWidth
== 128) && "Unexpected operand width") ? void (0) : __assert_fail
("(OperandWidth == 64 || OperandWidth == 128) && \"Unexpected operand width\""
, "llvm/lib/IR/AutoUpgrade.cpp", 3992, __extension__ __PRETTY_FUNCTION__
))
3992 "Unexpected operand width")(static_cast <bool> ((OperandWidth == 64 || OperandWidth
== 128) && "Unexpected operand width") ? void (0) : __assert_fail
("(OperandWidth == 64 || OperandWidth == 128) && \"Unexpected operand width\""
, "llvm/lib/IR/AutoUpgrade.cpp", 3992, __extension__ __PRETTY_FUNCTION__
))
;
3993 Type *NewTy = FixedVectorType::get(Type::getBFloatTy(C), OperandWidth / 16);
3994 auto Iter = CI->args().begin();
3995 Args.push_back(*Iter++);
3996 Args.push_back(Builder.CreateBitCast(*Iter++, NewTy));
3997 Args.push_back(Builder.CreateBitCast(*Iter++, NewTy));
3998 NewCall = Builder.CreateCall(NewFn, Args);
3999 break;
4000 }
4001
4002 case Intrinsic::bitreverse:
4003 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)});
4004 break;
4005
4006 case Intrinsic::ctlz:
4007 case Intrinsic::cttz:
4008 assert(CI->arg_size() == 1 &&(static_cast <bool> (CI->arg_size() == 1 && "Mismatch between function args and call args"
) ? void (0) : __assert_fail ("CI->arg_size() == 1 && \"Mismatch between function args and call args\""
, "llvm/lib/IR/AutoUpgrade.cpp", 4009, __extension__ __PRETTY_FUNCTION__
))
4009 "Mismatch between function args and call args")(static_cast <bool> (CI->arg_size() == 1 && "Mismatch between function args and call args"
) ? void (0) : __assert_fail ("CI->arg_size() == 1 && \"Mismatch between function args and call args\""
, "llvm/lib/IR/AutoUpgrade.cpp", 4009, __extension__ __PRETTY_FUNCTION__
))
;
4010 NewCall =
4011 Builder.CreateCall(NewFn, {CI->getArgOperand(0), Builder.getFalse()});
4012 break;
4013
4014 case Intrinsic::objectsize: {
4015 Value *NullIsUnknownSize =
4016 CI->arg_size() == 2 ? Builder.getFalse() : CI->getArgOperand(2);
4017 Value *Dynamic =
4018 CI->arg_size() < 4 ? Builder.getFalse() : CI->getArgOperand(3);
4019 NewCall = Builder.CreateCall(
4020 NewFn, {CI->getArgOperand(0), CI->getArgOperand(1), NullIsUnknownSize, Dynamic});
4021 break;
4022 }
4023
4024 case Intrinsic::ctpop:
4025 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)});
4026 break;
4027
4028 case Intrinsic::convert_from_fp16:
4029 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)});
4030 break;
4031
4032 case Intrinsic::dbg_value:
4033 // Upgrade from the old version that had an extra offset argument.
4034 assert(CI->arg_size() == 4)(static_cast <bool> (CI->arg_size() == 4) ? void (0)
: __assert_fail ("CI->arg_size() == 4", "llvm/lib/IR/AutoUpgrade.cpp"
, 4034, __extension__ __PRETTY_FUNCTION__))
;
4035 // Drop nonzero offsets instead of attempting to upgrade them.
4036 if (auto *Offset = dyn_cast_or_null<Constant>(CI->getArgOperand(1)))
4037 if (Offset->isZeroValue()) {
4038 NewCall = Builder.CreateCall(
4039 NewFn,
4040 {CI->getArgOperand(0), CI->getArgOperand(2), CI->getArgOperand(3)});
4041 break;
4042 }
4043 CI->eraseFromParent();
4044 return;
4045
4046 case Intrinsic::ptr_annotation:
4047 // Upgrade from versions that lacked the annotation attribute argument.
4048 if (CI->arg_size() != 4) {
4049 DefaultCase();
4050 return;
4051 }
4052
4053 // Create a new call with an added null annotation attribute argument.
4054 NewCall = Builder.CreateCall(
4055 NewFn,
4056 {CI->getArgOperand(0), CI->getArgOperand(1), CI->getArgOperand(2),
4057 CI->getArgOperand(3), Constant::getNullValue(Builder.getInt8PtrTy())});
4058 NewCall->takeName(CI);
4059 CI->replaceAllUsesWith(NewCall);
4060 CI->eraseFromParent();
4061 return;
4062
4063 case Intrinsic::var_annotation:
4064 // Upgrade from versions that lacked the annotation attribute argument.
4065 assert(CI->arg_size() == 4 &&(static_cast <bool> (CI->arg_size() == 4 && "Before LLVM 12.0 this intrinsic took four arguments"
) ? void (0) : __assert_fail ("CI->arg_size() == 4 && \"Before LLVM 12.0 this intrinsic took four arguments\""
, "llvm/lib/IR/AutoUpgrade.cpp", 4066, __extension__ __PRETTY_FUNCTION__
))
4066 "Before LLVM 12.0 this intrinsic took four arguments")(static_cast <bool> (CI->arg_size() == 4 && "Before LLVM 12.0 this intrinsic took four arguments"
) ? void (0) : __assert_fail ("CI->arg_size() == 4 && \"Before LLVM 12.0 this intrinsic took four arguments\""
, "llvm/lib/IR/AutoUpgrade.cpp", 4066, __extension__ __PRETTY_FUNCTION__
))
;
4067 // Create a new call with an added null annotation attribute argument.
4068 NewCall = Builder.CreateCall(
4069 NewFn,
4070 {CI->getArgOperand(0), CI->getArgOperand(1), CI->getArgOperand(2),
4071 CI->getArgOperand(3), Constant::getNullValue(Builder.getInt8PtrTy())});
4072 CI->eraseFromParent();
4073 return;
4074
4075 case Intrinsic::x86_xop_vfrcz_ss:
4076 case Intrinsic::x86_xop_vfrcz_sd:
4077 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(1)});
4078 break;
4079
4080 case Intrinsic::x86_xop_vpermil2pd:
4081 case Intrinsic::x86_xop_vpermil2ps:
4082 case Intrinsic::x86_xop_vpermil2pd_256:
4083 case Intrinsic::x86_xop_vpermil2ps_256: {
4084 SmallVector<Value *, 4> Args(CI->args());
4085 VectorType *FltIdxTy = cast<VectorType>(Args[2]->getType());
4086 VectorType *IntIdxTy = VectorType::getInteger(FltIdxTy);
4087 Args[2] = Builder.CreateBitCast(Args[2], IntIdxTy);
4088 NewCall = Builder.CreateCall(NewFn, Args);
4089 break;
4090 }
4091
4092 case Intrinsic::x86_sse41_ptestc:
4093 case Intrinsic::x86_sse41_ptestz:
4094 case Intrinsic::x86_sse41_ptestnzc: {
4095 // The arguments for these intrinsics used to be v4f32, and changed
4096 // to v2i64. This is purely a nop, since those are bitwise intrinsics.
4097 // So, the only thing required is a bitcast for both arguments.
4098 // First, check the arguments have the old type.
4099 Value *Arg0 = CI->getArgOperand(0);
4100 if (Arg0->getType() != FixedVectorType::get(Type::getFloatTy(C), 4))
4101 return;
4102
4103 // Old intrinsic, add bitcasts
4104 Value *Arg1 = CI->getArgOperand(1);
4105
4106 auto *NewVecTy = FixedVectorType::get(Type::getInt64Ty(C), 2);
4107
4108 Value *BC0 = Builder.CreateBitCast(Arg0, NewVecTy, "cast");
4109 Value *BC1 = Builder.CreateBitCast(Arg1, NewVecTy, "cast");
4110
4111 NewCall = Builder.CreateCall(NewFn, {BC0, BC1});
4112 break;
4113 }
4114
4115 case Intrinsic::x86_rdtscp: {
4116 // This used to take 1 arguments. If we have no arguments, it is already
4117 // upgraded.
4118 if (CI->getNumOperands() == 0)
4119 return;
4120
4121 NewCall = Builder.CreateCall(NewFn);
4122 // Extract the second result and store it.
4123 Value *Data = Builder.CreateExtractValue(NewCall, 1);
4124 // Cast the pointer to the right type.
4125 Value *Ptr = Builder.CreateBitCast(CI->getArgOperand(0),
4126 llvm::PointerType::getUnqual(Data->getType()));
4127 Builder.CreateAlignedStore(Data, Ptr, Align(1));
4128 // Replace the original call result with the first result of the new call.
4129 Value *TSC = Builder.CreateExtractValue(NewCall, 0);
4130
4131 NewCall->takeName(CI);
4132 CI->replaceAllUsesWith(TSC);
4133 CI->eraseFromParent();
4134 return;
4135 }
4136
4137 case Intrinsic::x86_sse41_insertps:
4138 case Intrinsic::x86_sse41_dppd:
4139 case Intrinsic::x86_sse41_dpps:
4140 case Intrinsic::x86_sse41_mpsadbw:
4141 case Intrinsic::x86_avx_dp_ps_256:
4142 case Intrinsic::x86_avx2_mpsadbw: {
4143 // Need to truncate the last argument from i32 to i8 -- this argument models
4144 // an inherently 8-bit immediate operand to these x86 instructions.
4145 SmallVector<Value *, 4> Args(CI->args());
4146
4147 // Replace the last argument with a trunc.
4148 Args.back() = Builder.CreateTrunc(Args.back(), Type::getInt8Ty(C), "trunc");
4149 NewCall = Builder.CreateCall(NewFn, Args);
4150 break;
4151 }
4152
4153 case Intrinsic::x86_avx512_mask_cmp_pd_128:
4154 case Intrinsic::x86_avx512_mask_cmp_pd_256:
4155 case Intrinsic::x86_avx512_mask_cmp_pd_512:
4156 case Intrinsic::x86_avx512_mask_cmp_ps_128:
4157 case Intrinsic::x86_avx512_mask_cmp_ps_256:
4158 case Intrinsic::x86_avx512_mask_cmp_ps_512: {
4159 SmallVector<Value *, 4> Args(CI->args());
4160 unsigned NumElts =
4161 cast<FixedVectorType>(Args[0]->getType())->getNumElements();
4162 Args[3] = getX86MaskVec(Builder, Args[3], NumElts);
4163
4164 NewCall = Builder.CreateCall(NewFn, Args);
4165 Value *Res = ApplyX86MaskOn1BitsVec(Builder, NewCall, nullptr);
4166
4167 NewCall->takeName(CI);
4168 CI->replaceAllUsesWith(Res);
4169 CI->eraseFromParent();
4170 return;
4171 }
4172
4173 case Intrinsic::thread_pointer: {
4174 NewCall = Builder.CreateCall(NewFn, {});
4175 break;
4176 }
4177
4178 case Intrinsic::invariant_start:
4179 case Intrinsic::invariant_end: {
4180 SmallVector<Value *, 4> Args(CI->args());
4181 NewCall = Builder.CreateCall(NewFn, Args);
4182 break;
4183 }
4184 case Intrinsic::masked_load:
4185 case Intrinsic::masked_store:
4186 case Intrinsic::masked_gather:
4187 case Intrinsic::masked_scatter: {
4188 SmallVector<Value *, 4> Args(CI->args());
4189 NewCall = Builder.CreateCall(NewFn, Args);
4190 NewCall->copyMetadata(*CI);
4191 break;
4192 }
4193
4194 case Intrinsic::memcpy:
4195 case Intrinsic::memmove:
4196 case Intrinsic::memset: {
4197 // We have to make sure that the call signature is what we're expecting.
4198 // We only want to change the old signatures by removing the alignment arg:
4199 // @llvm.mem[cpy|move]...(i8*, i8*, i[32|i64], i32, i1)
4200 // -> @llvm.mem[cpy|move]...(i8*, i8*, i[32|i64], i1)
4201 // @llvm.memset...(i8*, i8, i[32|64], i32, i1)
4202 // -> @llvm.memset...(i8*, i8, i[32|64], i1)
4203 // Note: i8*'s in the above can be any pointer type
4204 if (CI->arg_size() != 5) {
4205 DefaultCase();
4206 return;
4207 }
4208 // Remove alignment argument (3), and add alignment attributes to the
4209 // dest/src pointers.
4210 Value *Args[4] = {CI->getArgOperand(0), CI->getArgOperand(1),
4211 CI->getArgOperand(2), CI->getArgOperand(4)};
4212 NewCall = Builder.CreateCall(NewFn, Args);
4213 AttributeList OldAttrs = CI->getAttributes();
4214 AttributeList NewAttrs = AttributeList::get(
4215 C, OldAttrs.getFnAttrs(), OldAttrs.getRetAttrs(),
4216 {OldAttrs.getParamAttrs(0), OldAttrs.getParamAttrs(1),
4217 OldAttrs.getParamAttrs(2), OldAttrs.getParamAttrs(4)});
4218 NewCall->setAttributes(NewAttrs);
4219 auto *MemCI = cast<MemIntrinsic>(NewCall);
4220 // All mem intrinsics support dest alignment.
4221 const ConstantInt *Align = cast<ConstantInt>(CI->getArgOperand(3));
4222 MemCI->setDestAlignment(Align->getMaybeAlignValue());
4223 // Memcpy/Memmove also support source alignment.
4224 if (auto *MTI = dyn_cast<MemTransferInst>(MemCI))
4225 MTI->setSourceAlignment(Align->getMaybeAlignValue());
4226 break;
4227 }
4228 }
4229 assert(NewCall && "Should have either set this variable or returned through "(static_cast <bool> (NewCall && "Should have either set this variable or returned through "
"the default case") ? void (0) : __assert_fail ("NewCall && \"Should have either set this variable or returned through \" \"the default case\""
, "llvm/lib/IR/AutoUpgrade.cpp", 4230, __extension__ __PRETTY_FUNCTION__
))
4230 "the default case")(static_cast <bool> (NewCall && "Should have either set this variable or returned through "
"the default case") ? void (0) : __assert_fail ("NewCall && \"Should have either set this variable or returned through \" \"the default case\""
, "llvm/lib/IR/AutoUpgrade.cpp", 4230, __extension__ __PRETTY_FUNCTION__
))
;
4231 NewCall->takeName(CI);
4232 CI->replaceAllUsesWith(NewCall);
4233 CI->eraseFromParent();
4234}
4235
4236void llvm::UpgradeCallsToIntrinsic(Function *F) {
4237 assert(F && "Illegal attempt to upgrade a non-existent intrinsic.")(static_cast <bool> (F && "Illegal attempt to upgrade a non-existent intrinsic."
) ? void (0) : __assert_fail ("F && \"Illegal attempt to upgrade a non-existent intrinsic.\""
, "llvm/lib/IR/AutoUpgrade.cpp", 4237, __extension__ __PRETTY_FUNCTION__
))
;
4238
4239 // Check if this function should be upgraded and get the replacement function
4240 // if there is one.
4241 Function *NewFn;
4242 if (UpgradeIntrinsicFunction(F, NewFn)) {
4243 // Replace all users of the old function with the new function or new
4244 // instructions. This is not a range loop because the call is deleted.
4245 for (User *U : make_early_inc_range(F->users()))
4246 if (CallBase *CB = dyn_cast<CallBase>(U))
4247 UpgradeIntrinsicCall(CB, NewFn);
4248
4249 // Remove old function, no longer used, from the module.
4250 F->eraseFromParent();
4251 }
4252}
4253
4254MDNode *llvm::UpgradeTBAANode(MDNode &MD) {
4255 // Check if the tag uses struct-path aware TBAA format.
4256 if (isa<MDNode>(MD.getOperand(0)) && MD.getNumOperands() >= 3)
4257 return &MD;
4258
4259 auto &Context = MD.getContext();
4260 if (MD.getNumOperands() == 3) {
4261 Metadata *Elts[] = {MD.getOperand(0), MD.getOperand(1)};
4262 MDNode *ScalarType = MDNode::get(Context, Elts);
4263 // Create a MDNode <ScalarType, ScalarType, offset 0, const>
4264 Metadata *Elts2[] = {ScalarType, ScalarType,
4265 ConstantAsMetadata::get(
4266 Constant::getNullValue(Type::getInt64Ty(Context))),
4267 MD.getOperand(2)};
4268 return MDNode::get(Context, Elts2);
4269 }
4270 // Create a MDNode <MD, MD, offset 0>
4271 Metadata *Elts[] = {&MD, &MD, ConstantAsMetadata::get(Constant::getNullValue(
4272 Type::getInt64Ty(Context)))};
4273 return MDNode::get(Context, Elts);
4274}
4275
4276Instruction *llvm::UpgradeBitCastInst(unsigned Opc, Value *V, Type *DestTy,
4277 Instruction *&Temp) {
4278 if (Opc != Instruction::BitCast)
4279 return nullptr;
4280
4281 Temp = nullptr;
4282 Type *SrcTy = V->getType();
4283 if (SrcTy->isPtrOrPtrVectorTy() && DestTy->isPtrOrPtrVectorTy() &&
4284 SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace()) {
4285 LLVMContext &Context = V->getContext();
4286
4287 // We have no information about target data layout, so we assume that
4288 // the maximum pointer size is 64bit.
4289 Type *MidTy = Type::getInt64Ty(Context);
4290 Temp = CastInst::Create(Instruction::PtrToInt, V, MidTy);
4291
4292 return CastInst::Create(Instruction::IntToPtr, Temp, DestTy);
4293 }
4294
4295 return nullptr;
4296}
4297
4298Constant *llvm::UpgradeBitCastExpr(unsigned Opc, Constant *C, Type *DestTy) {
4299 if (Opc != Instruction::BitCast)
4300 return nullptr;
4301
4302 Type *SrcTy = C->getType();
4303 if (SrcTy->isPtrOrPtrVectorTy() && DestTy->isPtrOrPtrVectorTy() &&
4304 SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace()) {
4305 LLVMContext &Context = C->getContext();
4306
4307 // We have no information about target data layout, so we assume that
4308 // the maximum pointer size is 64bit.
4309 Type *MidTy = Type::getInt64Ty(Context);
4310
4311 return ConstantExpr::getIntToPtr(ConstantExpr::getPtrToInt(C, MidTy),
4312 DestTy);
4313 }
4314
4315 return nullptr;
4316}
4317
4318/// Check the debug info version number, if it is out-dated, drop the debug
4319/// info. Return true if module is modified.
4320bool llvm::UpgradeDebugInfo(Module &M) {
4321 unsigned Version = getDebugMetadataVersionFromModule(M);
4322 if (Version == DEBUG_METADATA_VERSION) {
4323 bool BrokenDebugInfo = false;
4324 if (verifyModule(M, &llvm::errs(), &BrokenDebugInfo))
4325 report_fatal_error("Broken module found, compilation aborted!");
4326 if (!BrokenDebugInfo)
4327 // Everything is ok.
4328 return false;
4329 else {
4330 // Diagnose malformed debug info.
4331 DiagnosticInfoIgnoringInvalidDebugMetadata Diag(M);
4332 M.getContext().diagnose(Diag);
4333 }
4334 }
4335 bool Modified = StripDebugInfo(M);
4336 if (Modified && Version != DEBUG_METADATA_VERSION) {
4337 // Diagnose a version mismatch.
4338 DiagnosticInfoDebugMetadataVersion DiagVersion(M, Version);
4339 M.getContext().diagnose(DiagVersion);
4340 }
4341 return Modified;
4342}
4343
4344/// This checks for objc retain release marker which should be upgraded. It
4345/// returns true if module is modified.
4346static bool UpgradeRetainReleaseMarker(Module &M) {
4347 bool Changed = false;
4348 const char *MarkerKey = "clang.arc.retainAutoreleasedReturnValueMarker";
4349 NamedMDNode *ModRetainReleaseMarker = M.getNamedMetadata(MarkerKey);
4350 if (ModRetainReleaseMarker) {
4351 MDNode *Op = ModRetainReleaseMarker->getOperand(0);
4352 if (Op) {
4353 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(0));
4354 if (ID) {
4355 SmallVector<StringRef, 4> ValueComp;
4356 ID->getString().split(ValueComp, "#");
4357 if (ValueComp.size() == 2) {
4358 std::string NewValue = ValueComp[0].str() + ";" + ValueComp[1].str();
4359 ID = MDString::get(M.getContext(), NewValue);
4360 }
4361 M.addModuleFlag(Module::Error, MarkerKey, ID);
4362 M.eraseNamedMetadata(ModRetainReleaseMarker);
4363 Changed = true;
4364 }
4365 }
4366 }
4367 return Changed;
4368}
4369
4370void llvm::UpgradeARCRuntime(Module &M) {
4371 // This lambda converts normal function calls to ARC runtime functions to
4372 // intrinsic calls.
4373 auto UpgradeToIntrinsic = [&](const char *OldFunc,
4374 llvm::Intrinsic::ID IntrinsicFunc) {
4375 Function *Fn = M.getFunction(OldFunc);
4376
4377 if (!Fn)
4378 return;
4379
4380 Function *NewFn = llvm::Intrinsic::getDeclaration(&M, IntrinsicFunc);
4381
4382 for (User *U : make_early_inc_range(Fn->users())) {
4383 CallInst *CI = dyn_cast<CallInst>(U);
4384 if (!CI || CI->getCalledFunction() != Fn)
4385 continue;
4386
4387 IRBuilder<> Builder(CI->getParent(), CI->getIterator());
4388 FunctionType *NewFuncTy = NewFn->getFunctionType();
4389 SmallVector<Value *, 2> Args;
4390
4391 // Don't upgrade the intrinsic if it's not valid to bitcast the return
4392 // value to the return type of the old function.
4393 if (NewFuncTy->getReturnType() != CI->getType() &&
4394 !CastInst::castIsValid(Instruction::BitCast, CI,
4395 NewFuncTy->getReturnType()))
4396 continue;
4397
4398 bool InvalidCast = false;
4399
4400 for (unsigned I = 0, E = CI->arg_size(); I != E; ++I) {
4401 Value *Arg = CI->getArgOperand(I);
4402
4403 // Bitcast argument to the parameter type of the new function if it's
4404 // not a variadic argument.
4405 if (I < NewFuncTy->getNumParams()) {
4406 // Don't upgrade the intrinsic if it's not valid to bitcast the argument
4407 // to the parameter type of the new function.
4408 if (!CastInst::castIsValid(Instruction::BitCast, Arg,
4409 NewFuncTy->getParamType(I))) {
4410 InvalidCast = true;
4411 break;
4412 }
4413 Arg = Builder.CreateBitCast(Arg, NewFuncTy->getParamType(I));
4414 }
4415 Args.push_back(Arg);
4416 }
4417
4418 if (InvalidCast)
4419 continue;
4420
4421 // Create a call instruction that calls the new function.
4422 CallInst *NewCall = Builder.CreateCall(NewFuncTy, NewFn, Args);
4423 NewCall->setTailCallKind(cast<CallInst>(CI)->getTailCallKind());
4424 NewCall->takeName(CI);
4425
4426 // Bitcast the return value back to the type of the old call.
4427 Value *NewRetVal = Builder.CreateBitCast(NewCall, CI->getType());
4428
4429 if (!CI->use_empty())
4430 CI->replaceAllUsesWith(NewRetVal);
4431 CI->eraseFromParent();
4432 }
4433
4434 if (Fn->use_empty())
4435 Fn->eraseFromParent();
4436 };
4437
4438 // Unconditionally convert a call to "clang.arc.use" to a call to
4439 // "llvm.objc.clang.arc.use".
4440 UpgradeToIntrinsic("clang.arc.use", llvm::Intrinsic::objc_clang_arc_use);
4441
4442 // Upgrade the retain release marker. If there is no need to upgrade
4443 // the marker, that means either the module is already new enough to contain
4444 // new intrinsics or it is not ARC. There is no need to upgrade runtime call.
4445 if (!UpgradeRetainReleaseMarker(M))
4446 return;
4447
4448 std::pair<const char *, llvm::Intrinsic::ID> RuntimeFuncs[] = {
4449 {"objc_autorelease", llvm::Intrinsic::objc_autorelease},
4450 {"objc_autoreleasePoolPop", llvm::Intrinsic::objc_autoreleasePoolPop},
4451 {"objc_autoreleasePoolPush", llvm::Intrinsic::objc_autoreleasePoolPush},
4452 {"objc_autoreleaseReturnValue",
4453 llvm::Intrinsic::objc_autoreleaseReturnValue},
4454 {"objc_copyWeak", llvm::Intrinsic::objc_copyWeak},
4455 {"objc_destroyWeak", llvm::Intrinsic::objc_destroyWeak},
4456 {"objc_initWeak", llvm::Intrinsic::objc_initWeak},
4457 {"objc_loadWeak", llvm::Intrinsic::objc_loadWeak},
4458 {"objc_loadWeakRetained", llvm::Intrinsic::objc_loadWeakRetained},
4459 {"objc_moveWeak", llvm::Intrinsic::objc_moveWeak},
4460 {"objc_release", llvm::Intrinsic::objc_release},
4461 {"objc_retain", llvm::Intrinsic::objc_retain},
4462 {"objc_retainAutorelease", llvm::Intrinsic::objc_retainAutorelease},
4463 {"objc_retainAutoreleaseReturnValue",
4464 llvm::Intrinsic::objc_retainAutoreleaseReturnValue},
4465 {"objc_retainAutoreleasedReturnValue",
4466 llvm::Intrinsic::objc_retainAutoreleasedReturnValue},
4467 {"objc_retainBlock", llvm::Intrinsic::objc_retainBlock},
4468 {"objc_storeStrong", llvm::Intrinsic::objc_storeStrong},
4469 {"objc_storeWeak", llvm::Intrinsic::objc_storeWeak},
4470 {"objc_unsafeClaimAutoreleasedReturnValue",
4471 llvm::Intrinsic::objc_unsafeClaimAutoreleasedReturnValue},
4472 {"objc_retainedObject", llvm::Intrinsic::objc_retainedObject},
4473 {"objc_unretainedObject", llvm::Intrinsic::objc_unretainedObject},
4474 {"objc_unretainedPointer", llvm::Intrinsic::objc_unretainedPointer},
4475 {"objc_retain_autorelease", llvm::Intrinsic::objc_retain_autorelease},
4476 {"objc_sync_enter", llvm::Intrinsic::objc_sync_enter},
4477 {"objc_sync_exit", llvm::Intrinsic::objc_sync_exit},
4478 {"objc_arc_annotation_topdown_bbstart",
4479 llvm::Intrinsic::objc_arc_annotation_topdown_bbstart},
4480 {"objc_arc_annotation_topdown_bbend",
4481 llvm::Intrinsic::objc_arc_annotation_topdown_bbend},
4482 {"objc_arc_annotation_bottomup_bbstart",
4483 llvm::Intrinsic::objc_arc_annotation_bottomup_bbstart},
4484 {"objc_arc_annotation_bottomup_bbend",
4485 llvm::Intrinsic::objc_arc_annotation_bottomup_bbend}};
4486
4487 for (auto &I : RuntimeFuncs)
4488 UpgradeToIntrinsic(I.first, I.second);
4489}
4490
4491bool llvm::UpgradeModuleFlags(Module &M) {
4492 NamedMDNode *ModFlags = M.getModuleFlagsMetadata();
4493 if (!ModFlags)
4494 return false;
4495
4496 bool HasObjCFlag = false, HasClassProperties = false, Changed = false;
4497 bool HasSwiftVersionFlag = false;
4498 uint8_t SwiftMajorVersion, SwiftMinorVersion;
4499 uint32_t SwiftABIVersion;
4500 auto Int8Ty = Type::getInt8Ty(M.getContext());
4501 auto Int32Ty = Type::getInt32Ty(M.getContext());
4502
4503 for (unsigned I = 0, E = ModFlags->getNumOperands(); I != E; ++I) {
4504 MDNode *Op = ModFlags->getOperand(I);
4505 if (Op->getNumOperands() != 3)
4506 continue;
4507 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
4508 if (!ID)
4509 continue;
4510 auto SetBehavior = [&](Module::ModFlagBehavior B) {
4511 Metadata *Ops[3] = {ConstantAsMetadata::get(ConstantInt::get(
4512 Type::getInt32Ty(M.getContext()), B)),
4513 MDString::get(M.getContext(), ID->getString()),
4514 Op->getOperand(2)};
4515 ModFlags->setOperand(I, MDNode::get(M.getContext(), Ops));
4516 Changed = true;
4517 };
4518
4519 if (ID->getString() == "Objective-C Image Info Version")
4520 HasObjCFlag = true;
4521 if (ID->getString() == "Objective-C Class Properties")
4522 HasClassProperties = true;
4523 // Upgrade PIC from Error/Max to Min.
4524 if (ID->getString() == "PIC Level") {
4525 if (auto *Behavior =
4526 mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0))) {
4527 uint64_t V = Behavior->getLimitedValue();
4528 if (V == Module::Error || V == Module::Max)
4529 SetBehavior(Module::Min);
4530 }
4531 }
4532 // Upgrade "PIE Level" from Error to Max.
4533 if (ID->getString() == "PIE Level")
4534 if (auto *Behavior =
4535 mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0)))
4536 if (Behavior->getLimitedValue() == Module::Error)
4537 SetBehavior(Module::Max);
4538
4539 // Upgrade branch protection and return address signing module flags. The
4540 // module flag behavior for these fields were Error and now they are Min.
4541 if (ID->getString() == "branch-target-enforcement" ||
4542 ID->getString().startswith("sign-return-address")) {
4543 if (auto *Behavior =
4544 mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0))) {
4545 if (Behavior->getLimitedValue() == Module::Error) {
4546 Type *Int32Ty = Type::getInt32Ty(M.getContext());
4547 Metadata *Ops[3] = {
4548 ConstantAsMetadata::get(ConstantInt::get(Int32Ty, Module::Min)),
4549 Op->getOperand(1), Op->getOperand(2)};
4550 ModFlags->setOperand(I, MDNode::get(M.getContext(), Ops));
4551 Changed = true;
4552 }
4553 }
4554 }
4555
4556 // Upgrade Objective-C Image Info Section. Removed the whitespce in the
4557 // section name so that llvm-lto will not complain about mismatching
4558 // module flags that is functionally the same.
4559 if (ID->getString() == "Objective-C Image Info Section") {
4560 if (auto *Value = dyn_cast_or_null<MDString>(Op->getOperand(2))) {
4561 SmallVector<StringRef, 4> ValueComp;
4562 Value->getString().split(ValueComp, " ");
4563 if (ValueComp.size() != 1) {
4564 std::string NewValue;
4565 for (auto &S : ValueComp)
4566 NewValue += S.str();
4567 Metadata *Ops[3] = {Op->getOperand(0), Op->getOperand(1),
4568 MDString::get(M.getContext(), NewValue)};
4569 ModFlags->setOperand(I, MDNode::get(M.getContext(), Ops));
4570 Changed = true;
4571 }
4572 }
4573 }
4574
4575 // IRUpgrader turns a i32 type "Objective-C Garbage Collection" into i8 value.
4576 // If the higher bits are set, it adds new module flag for swift info.
4577 if (ID->getString() == "Objective-C Garbage Collection") {
4578 auto Md = dyn_cast<ConstantAsMetadata>(Op->getOperand(2));
4579 if (Md) {
4580 assert(Md->getValue() && "Expected non-empty metadata")(static_cast <bool> (Md->getValue() && "Expected non-empty metadata"
) ? void (0) : __assert_fail ("Md->getValue() && \"Expected non-empty metadata\""
, "llvm/lib/IR/AutoUpgrade.cpp", 4580, __extension__ __PRETTY_FUNCTION__
))
;
4581 auto Type = Md->getValue()->getType();
4582 if (Type == Int8Ty)
4583 continue;
4584 unsigned Val = Md->getValue()->getUniqueInteger().getZExtValue();
4585 if ((Val & 0xff) != Val) {
4586 HasSwiftVersionFlag = true;
4587 SwiftABIVersion = (Val & 0xff00) >> 8;
4588 SwiftMajorVersion = (Val & 0xff000000) >> 24;
4589 SwiftMinorVersion = (Val & 0xff0000) >> 16;
4590 }
4591 Metadata *Ops[3] = {
4592 ConstantAsMetadata::get(ConstantInt::get(Int32Ty,Module::Error)),
4593 Op->getOperand(1),
4594 ConstantAsMetadata::get(ConstantInt::get(Int8Ty,Val & 0xff))};
4595 ModFlags->setOperand(I, MDNode::get(M.getContext(), Ops));
4596 Changed = true;
4597 }
4598 }
4599 }
4600
4601 // "Objective-C Class Properties" is recently added for Objective-C. We
4602 // upgrade ObjC bitcodes to contain a "Objective-C Class Properties" module
4603 // flag of value 0, so we can correclty downgrade this flag when trying to
4604 // link an ObjC bitcode without this module flag with an ObjC bitcode with
4605 // this module flag.
4606 if (HasObjCFlag && !HasClassProperties) {
4607 M.addModuleFlag(llvm::Module::Override, "Objective-C Class Properties",
4608 (uint32_t)0);
4609 Changed = true;
4610 }
4611
4612 if (HasSwiftVersionFlag) {
4613 M.addModuleFlag(Module::Error, "Swift ABI Version",
4614 SwiftABIVersion);
4615 M.addModuleFlag(Module::Error, "Swift Major Version",
4616 ConstantInt::get(Int8Ty, SwiftMajorVersion));
4617 M.addModuleFlag(Module::Error, "Swift Minor Version",
4618 ConstantInt::get(Int8Ty, SwiftMinorVersion));
4619 Changed = true;
4620 }
4621
4622 return Changed;
4623}
4624
4625void llvm::UpgradeSectionAttributes(Module &M) {
4626 auto TrimSpaces = [](StringRef Section) -> std::string {
4627 SmallVector<StringRef, 5> Components;
4628 Section.split(Components, ',');
4629
4630 SmallString<32> Buffer;
4631 raw_svector_ostream OS(Buffer);
4632
4633 for (auto Component : Components)
4634 OS << ',' << Component.trim();
4635
4636 return std::string(OS.str().substr(1));
4637 };
4638
4639 for (auto &GV : M.globals()) {
4640 if (!GV.hasSection())
4641 continue;
4642
4643 StringRef Section = GV.getSection();
4644
4645 if (!Section.startswith("__DATA, __objc_catlist"))
4646 continue;
4647
4648 // __DATA, __objc_catlist, regular, no_dead_strip
4649 // __DATA,__objc_catlist,regular,no_dead_strip
4650 GV.setSection(TrimSpaces(Section));
4651 }
4652}
4653
4654namespace {
4655// Prior to LLVM 10.0, the strictfp attribute could be used on individual
4656// callsites within a function that did not also have the strictfp attribute.
4657// Since 10.0, if strict FP semantics are needed within a function, the
4658// function must have the strictfp attribute and all calls within the function
4659// must also have the strictfp attribute. This latter restriction is
4660// necessary to prevent unwanted libcall simplification when a function is
4661// being cloned (such as for inlining).
4662//
4663// The "dangling" strictfp attribute usage was only used to prevent constant
4664// folding and other libcall simplification. The nobuiltin attribute on the
4665// callsite has the same effect.
4666struct StrictFPUpgradeVisitor : public InstVisitor<StrictFPUpgradeVisitor> {
4667 StrictFPUpgradeVisitor() = default;
4668
4669 void visitCallBase(CallBase &Call) {
4670 if (!Call.isStrictFP())
4671 return;
4672 if (isa<ConstrainedFPIntrinsic>(&Call))
4673 return;
4674 // If we get here, the caller doesn't have the strictfp attribute
4675 // but this callsite does. Replace the strictfp attribute with nobuiltin.
4676 Call.removeFnAttr(Attribute::StrictFP);
4677 Call.addFnAttr(Attribute::NoBuiltin);
4678 }
4679};
4680} // namespace
4681
4682void llvm::UpgradeFunctionAttributes(Function &F) {
4683 // If a function definition doesn't have the strictfp attribute,
4684 // convert any callsite strictfp attributes to nobuiltin.
4685 if (!F.isDeclaration() && !F.hasFnAttribute(Attribute::StrictFP)) {
4686 StrictFPUpgradeVisitor SFPV;
4687 SFPV.visit(F);
4688 }
4689
4690 // Remove all incompatibile attributes from function.
4691 F.removeRetAttrs(AttributeFuncs::typeIncompatible(F.getReturnType()));
4692 for (auto &Arg : F.args())
4693 Arg.removeAttrs(AttributeFuncs::typeIncompatible(Arg.getType()));
4694}
4695
4696static bool isOldLoopArgument(Metadata *MD) {
4697 auto *T = dyn_cast_or_null<MDTuple>(MD);
4698 if (!T)
4699 return false;
4700 if (T->getNumOperands() < 1)
4701 return false;
4702 auto *S = dyn_cast_or_null<MDString>(T->getOperand(0));
4703 if (!S)
4704 return false;
4705 return S->getString().startswith("llvm.vectorizer.");
4706}
4707
4708static MDString *upgradeLoopTag(LLVMContext &C, StringRef OldTag) {
4709 StringRef OldPrefix = "llvm.vectorizer.";
4710 assert(OldTag.startswith(OldPrefix) && "Expected old prefix")(static_cast <bool> (OldTag.startswith(OldPrefix) &&
"Expected old prefix") ? void (0) : __assert_fail ("OldTag.startswith(OldPrefix) && \"Expected old prefix\""
, "llvm/lib/IR/AutoUpgrade.cpp", 4710, __extension__ __PRETTY_FUNCTION__
))
;
4711
4712 if (OldTag == "llvm.vectorizer.unroll")
4713 return MDString::get(C, "llvm.loop.interleave.count");
4714
4715 return MDString::get(
4716 C, (Twine("llvm.loop.vectorize.") + OldTag.drop_front(OldPrefix.size()))
4717 .str());
4718}
4719
4720static Metadata *upgradeLoopArgument(Metadata *MD) {
4721 auto *T = dyn_cast_or_null<MDTuple>(MD);
4722 if (!T)
4723 return MD;
4724 if (T->getNumOperands() < 1)
4725 return MD;
4726 auto *OldTag = dyn_cast_or_null<MDString>(T->getOperand(0));
4727 if (!OldTag)
4728 return MD;
4729 if (!OldTag->getString().startswith("llvm.vectorizer."))
4730 return MD;
4731
4732 // This has an old tag. Upgrade it.
4733 SmallVector<Metadata *, 8> Ops;
4734 Ops.reserve(T->getNumOperands());
4735 Ops.push_back(upgradeLoopTag(T->getContext(), OldTag->getString()));
4736 for (unsigned I = 1, E = T->getNumOperands(); I != E; ++I)
4737 Ops.push_back(T->getOperand(I));
4738
4739 return MDTuple::get(T->getContext(), Ops);
4740}
4741
4742MDNode *llvm::upgradeInstructionLoopAttachment(MDNode &N) {
4743 auto *T = dyn_cast<MDTuple>(&N);
4744 if (!T)
4745 return &N;
4746
4747 if (none_of(T->operands(), isOldLoopArgument))
4748 return &N;
4749
4750 SmallVector<Metadata *, 8> Ops;
4751 Ops.reserve(T->getNumOperands());
4752 for (Metadata *MD : T->operands())
4753 Ops.push_back(upgradeLoopArgument(MD));
4754
4755 return MDTuple::get(T->getContext(), Ops);
4756}
4757
4758std::string llvm::UpgradeDataLayoutString(StringRef DL, StringRef TT) {
4759 Triple T(TT);
4760 // For AMDGPU we uprgrade older DataLayouts to include the default globals
4761 // address space of 1.
4762 if (T.isAMDGPU() && !DL.contains("-G") && !DL.startswith("G")) {
4763 return DL.empty() ? std::string("G1") : (DL + "-G1").str();
4764 }
4765
4766 std::string Res = DL.str();
4767 if (!T.isX86())
4768 return Res;
4769
4770 // If the datalayout matches the expected format, add pointer size address
4771 // spaces to the datalayout.
4772 std::string AddrSpaces = "-p270:32:32-p271:32:32-p272:64:64";
4773 if (!DL.contains(AddrSpaces)) {
4774 SmallVector<StringRef, 4> Groups;
4775 Regex R("(e-m:[a-z](-p:32:32)?)(-[if]64:.*$)");
4776 if (R.match(DL, &Groups))
4777 Res = (Groups[1] + AddrSpaces + Groups[3]).str();
4778 }
4779
4780 // For 32-bit MSVC targets, raise the alignment of f80 values to 16 bytes.
4781 // Raising the alignment is safe because Clang did not produce f80 values in
4782 // the MSVC environment before this upgrade was added.
4783 if (T.isWindowsMSVCEnvironment() && !T.isArch64Bit()) {
4784 StringRef Ref = Res;
4785 auto I = Ref.find("-f80:32-");
4786 if (I != StringRef::npos)
4787 Res = (Ref.take_front(I) + "-f80:128-" + Ref.drop_front(I + 8)).str();
4788 }
4789
4790 return Res;
4791}
4792
4793void llvm::UpgradeAttributes(AttrBuilder &B) {
4794 StringRef FramePointer;
4795 Attribute A = B.getAttribute("no-frame-pointer-elim");
4796 if (A.isValid()) {
4797 // The value can be "true" or "false".
4798 FramePointer = A.getValueAsString() == "true" ? "all" : "none";
4799 B.removeAttribute("no-frame-pointer-elim");
4800 }
4801 if (B.contains("no-frame-pointer-elim-non-leaf")) {
4802 // The value is ignored. "no-frame-pointer-elim"="true" takes priority.
4803 if (FramePointer != "all")
4804 FramePointer = "non-leaf";
4805 B.removeAttribute("no-frame-pointer-elim-non-leaf");
4806 }
4807 if (!FramePointer.empty())
4808 B.addAttribute("frame-pointer", FramePointer);
4809
4810 A = B.getAttribute("null-pointer-is-valid");
4811 if (A.isValid()) {
4812 // The value can be "true" or "false".
4813 bool NullPointerIsValid = A.getValueAsString() == "true";
4814 B.removeAttribute("null-pointer-is-valid");
4815 if (NullPointerIsValid)
4816 B.addAttribute(Attribute::NullPointerIsValid);
4817 }
4818}
4819
4820void llvm::UpgradeOperandBundles(std::vector<OperandBundleDef> &Bundles) {
4821
4822 // clang.arc.attachedcall bundles are now required to have an operand.
4823 // If they don't, it's okay to drop them entirely: when there is an operand,
4824 // the "attachedcall" is meaningful and required, but without an operand,
4825 // it's just a marker NOP. Dropping it merely prevents an optimization.
4826 erase_if(Bundles, [&](OperandBundleDef &OBD) {
4827 return OBD.getTag() == "clang.arc.attachedcall" &&
4828 OBD.inputs().empty();
4829 });
4830}