Bug Summary

File:llvm/lib/IR/AutoUpgrade.cpp
Warning:line 3906, column 5
Value stored to 'NewCall' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name AutoUpgrade.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/build-llvm -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/IR -I /build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/llvm/lib/IR -I include -I /build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/llvm/include -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/build-llvm=build-llvm -fmacro-prefix-map=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/= -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/build-llvm=build-llvm -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/= -O3 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/build-llvm=build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/= -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-01-19-134126-35450-1 -x c++ /build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/llvm/lib/IR/AutoUpgrade.cpp
1//===-- AutoUpgrade.cpp - Implement auto-upgrade helper functions ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the auto-upgrade helper functions.
10// This is where deprecated IR intrinsics and other IR features are updated to
11// current specifications.
12//
13//===----------------------------------------------------------------------===//
14
15#include "llvm/IR/AutoUpgrade.h"
16#include "llvm/ADT/StringSwitch.h"
17#include "llvm/IR/Constants.h"
18#include "llvm/IR/DIBuilder.h"
19#include "llvm/IR/DebugInfo.h"
20#include "llvm/IR/DiagnosticInfo.h"
21#include "llvm/IR/Function.h"
22#include "llvm/IR/IRBuilder.h"
23#include "llvm/IR/InstVisitor.h"
24#include "llvm/IR/Instruction.h"
25#include "llvm/IR/IntrinsicInst.h"
26#include "llvm/IR/Intrinsics.h"
27#include "llvm/IR/IntrinsicsAArch64.h"
28#include "llvm/IR/IntrinsicsARM.h"
29#include "llvm/IR/IntrinsicsX86.h"
30#include "llvm/IR/LLVMContext.h"
31#include "llvm/IR/Module.h"
32#include "llvm/IR/Verifier.h"
33#include "llvm/Support/ErrorHandling.h"
34#include "llvm/Support/Regex.h"
35#include <cstring>
36using namespace llvm;
37
38static void rename(GlobalValue *GV) { GV->setName(GV->getName() + ".old"); }
39
40// Upgrade the declarations of the SSE4.1 ptest intrinsics whose arguments have
41// changed their type from v4f32 to v2i64.
42static bool UpgradePTESTIntrinsic(Function* F, Intrinsic::ID IID,
43 Function *&NewFn) {
44 // Check whether this is an old version of the function, which received
45 // v4f32 arguments.
46 Type *Arg0Type = F->getFunctionType()->getParamType(0);
47 if (Arg0Type != FixedVectorType::get(Type::getFloatTy(F->getContext()), 4))
48 return false;
49
50 // Yes, it's old, replace it with new version.
51 rename(F);
52 NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
53 return true;
54}
55
56// Upgrade the declarations of intrinsic functions whose 8-bit immediate mask
57// arguments have changed their type from i32 to i8.
58static bool UpgradeX86IntrinsicsWith8BitMask(Function *F, Intrinsic::ID IID,
59 Function *&NewFn) {
60 // Check that the last argument is an i32.
61 Type *LastArgType = F->getFunctionType()->getParamType(
62 F->getFunctionType()->getNumParams() - 1);
63 if (!LastArgType->isIntegerTy(32))
64 return false;
65
66 // Move this function aside and map down.
67 rename(F);
68 NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
69 return true;
70}
71
72// Upgrade the declaration of fp compare intrinsics that change return type
73// from scalar to vXi1 mask.
74static bool UpgradeX86MaskedFPCompare(Function *F, Intrinsic::ID IID,
75 Function *&NewFn) {
76 // Check if the return type is a vector.
77 if (F->getReturnType()->isVectorTy())
78 return false;
79
80 rename(F);
81 NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
82 return true;
83}
84
85static bool ShouldUpgradeX86Intrinsic(Function *F, StringRef Name) {
86 // All of the intrinsics matches below should be marked with which llvm
87 // version started autoupgrading them. At some point in the future we would
88 // like to use this information to remove upgrade code for some older
89 // intrinsics. It is currently undecided how we will determine that future
90 // point.
91 if (Name == "addcarryx.u32" || // Added in 8.0
92 Name == "addcarryx.u64" || // Added in 8.0
93 Name == "addcarry.u32" || // Added in 8.0
94 Name == "addcarry.u64" || // Added in 8.0
95 Name == "subborrow.u32" || // Added in 8.0
96 Name == "subborrow.u64" || // Added in 8.0
97 Name.startswith("sse2.padds.") || // Added in 8.0
98 Name.startswith("sse2.psubs.") || // Added in 8.0
99 Name.startswith("sse2.paddus.") || // Added in 8.0
100 Name.startswith("sse2.psubus.") || // Added in 8.0
101 Name.startswith("avx2.padds.") || // Added in 8.0
102 Name.startswith("avx2.psubs.") || // Added in 8.0
103 Name.startswith("avx2.paddus.") || // Added in 8.0
104 Name.startswith("avx2.psubus.") || // Added in 8.0
105 Name.startswith("avx512.padds.") || // Added in 8.0
106 Name.startswith("avx512.psubs.") || // Added in 8.0
107 Name.startswith("avx512.mask.padds.") || // Added in 8.0
108 Name.startswith("avx512.mask.psubs.") || // Added in 8.0
109 Name.startswith("avx512.mask.paddus.") || // Added in 8.0
110 Name.startswith("avx512.mask.psubus.") || // Added in 8.0
111 Name=="ssse3.pabs.b.128" || // Added in 6.0
112 Name=="ssse3.pabs.w.128" || // Added in 6.0
113 Name=="ssse3.pabs.d.128" || // Added in 6.0
114 Name.startswith("fma4.vfmadd.s") || // Added in 7.0
115 Name.startswith("fma.vfmadd.") || // Added in 7.0
116 Name.startswith("fma.vfmsub.") || // Added in 7.0
117 Name.startswith("fma.vfmsubadd.") || // Added in 7.0
118 Name.startswith("fma.vfnmadd.") || // Added in 7.0
119 Name.startswith("fma.vfnmsub.") || // Added in 7.0
120 Name.startswith("avx512.mask.vfmadd.") || // Added in 7.0
121 Name.startswith("avx512.mask.vfnmadd.") || // Added in 7.0
122 Name.startswith("avx512.mask.vfnmsub.") || // Added in 7.0
123 Name.startswith("avx512.mask3.vfmadd.") || // Added in 7.0
124 Name.startswith("avx512.maskz.vfmadd.") || // Added in 7.0
125 Name.startswith("avx512.mask3.vfmsub.") || // Added in 7.0
126 Name.startswith("avx512.mask3.vfnmsub.") || // Added in 7.0
127 Name.startswith("avx512.mask.vfmaddsub.") || // Added in 7.0
128 Name.startswith("avx512.maskz.vfmaddsub.") || // Added in 7.0
129 Name.startswith("avx512.mask3.vfmaddsub.") || // Added in 7.0
130 Name.startswith("avx512.mask3.vfmsubadd.") || // Added in 7.0
131 Name.startswith("avx512.mask.shuf.i") || // Added in 6.0
132 Name.startswith("avx512.mask.shuf.f") || // Added in 6.0
133 Name.startswith("avx512.kunpck") || //added in 6.0
134 Name.startswith("avx2.pabs.") || // Added in 6.0
135 Name.startswith("avx512.mask.pabs.") || // Added in 6.0
136 Name.startswith("avx512.broadcastm") || // Added in 6.0
137 Name == "sse.sqrt.ss" || // Added in 7.0
138 Name == "sse2.sqrt.sd" || // Added in 7.0
139 Name.startswith("avx512.mask.sqrt.p") || // Added in 7.0
140 Name.startswith("avx.sqrt.p") || // Added in 7.0
141 Name.startswith("sse2.sqrt.p") || // Added in 7.0
142 Name.startswith("sse.sqrt.p") || // Added in 7.0
143 Name.startswith("avx512.mask.pbroadcast") || // Added in 6.0
144 Name.startswith("sse2.pcmpeq.") || // Added in 3.1
145 Name.startswith("sse2.pcmpgt.") || // Added in 3.1
146 Name.startswith("avx2.pcmpeq.") || // Added in 3.1
147 Name.startswith("avx2.pcmpgt.") || // Added in 3.1
148 Name.startswith("avx512.mask.pcmpeq.") || // Added in 3.9
149 Name.startswith("avx512.mask.pcmpgt.") || // Added in 3.9
150 Name.startswith("avx.vperm2f128.") || // Added in 6.0
151 Name == "avx2.vperm2i128" || // Added in 6.0
152 Name == "sse.add.ss" || // Added in 4.0
153 Name == "sse2.add.sd" || // Added in 4.0
154 Name == "sse.sub.ss" || // Added in 4.0
155 Name == "sse2.sub.sd" || // Added in 4.0
156 Name == "sse.mul.ss" || // Added in 4.0
157 Name == "sse2.mul.sd" || // Added in 4.0
158 Name == "sse.div.ss" || // Added in 4.0
159 Name == "sse2.div.sd" || // Added in 4.0
160 Name == "sse41.pmaxsb" || // Added in 3.9
161 Name == "sse2.pmaxs.w" || // Added in 3.9
162 Name == "sse41.pmaxsd" || // Added in 3.9
163 Name == "sse2.pmaxu.b" || // Added in 3.9
164 Name == "sse41.pmaxuw" || // Added in 3.9
165 Name == "sse41.pmaxud" || // Added in 3.9
166 Name == "sse41.pminsb" || // Added in 3.9
167 Name == "sse2.pmins.w" || // Added in 3.9
168 Name == "sse41.pminsd" || // Added in 3.9
169 Name == "sse2.pminu.b" || // Added in 3.9
170 Name == "sse41.pminuw" || // Added in 3.9
171 Name == "sse41.pminud" || // Added in 3.9
172 Name == "avx512.kand.w" || // Added in 7.0
173 Name == "avx512.kandn.w" || // Added in 7.0
174 Name == "avx512.knot.w" || // Added in 7.0
175 Name == "avx512.kor.w" || // Added in 7.0
176 Name == "avx512.kxor.w" || // Added in 7.0
177 Name == "avx512.kxnor.w" || // Added in 7.0
178 Name == "avx512.kortestc.w" || // Added in 7.0
179 Name == "avx512.kortestz.w" || // Added in 7.0
180 Name.startswith("avx512.mask.pshuf.b.") || // Added in 4.0
181 Name.startswith("avx2.pmax") || // Added in 3.9
182 Name.startswith("avx2.pmin") || // Added in 3.9
183 Name.startswith("avx512.mask.pmax") || // Added in 4.0
184 Name.startswith("avx512.mask.pmin") || // Added in 4.0
185 Name.startswith("avx2.vbroadcast") || // Added in 3.8
186 Name.startswith("avx2.pbroadcast") || // Added in 3.8
187 Name.startswith("avx.vpermil.") || // Added in 3.1
188 Name.startswith("sse2.pshuf") || // Added in 3.9
189 Name.startswith("avx512.pbroadcast") || // Added in 3.9
190 Name.startswith("avx512.mask.broadcast.s") || // Added in 3.9
191 Name.startswith("avx512.mask.movddup") || // Added in 3.9
192 Name.startswith("avx512.mask.movshdup") || // Added in 3.9
193 Name.startswith("avx512.mask.movsldup") || // Added in 3.9
194 Name.startswith("avx512.mask.pshuf.d.") || // Added in 3.9
195 Name.startswith("avx512.mask.pshufl.w.") || // Added in 3.9
196 Name.startswith("avx512.mask.pshufh.w.") || // Added in 3.9
197 Name.startswith("avx512.mask.shuf.p") || // Added in 4.0
198 Name.startswith("avx512.mask.vpermil.p") || // Added in 3.9
199 Name.startswith("avx512.mask.perm.df.") || // Added in 3.9
200 Name.startswith("avx512.mask.perm.di.") || // Added in 3.9
201 Name.startswith("avx512.mask.punpckl") || // Added in 3.9
202 Name.startswith("avx512.mask.punpckh") || // Added in 3.9
203 Name.startswith("avx512.mask.unpckl.") || // Added in 3.9
204 Name.startswith("avx512.mask.unpckh.") || // Added in 3.9
205 Name.startswith("avx512.mask.pand.") || // Added in 3.9
206 Name.startswith("avx512.mask.pandn.") || // Added in 3.9
207 Name.startswith("avx512.mask.por.") || // Added in 3.9
208 Name.startswith("avx512.mask.pxor.") || // Added in 3.9
209 Name.startswith("avx512.mask.and.") || // Added in 3.9
210 Name.startswith("avx512.mask.andn.") || // Added in 3.9
211 Name.startswith("avx512.mask.or.") || // Added in 3.9
212 Name.startswith("avx512.mask.xor.") || // Added in 3.9
213 Name.startswith("avx512.mask.padd.") || // Added in 4.0
214 Name.startswith("avx512.mask.psub.") || // Added in 4.0
215 Name.startswith("avx512.mask.pmull.") || // Added in 4.0
216 Name.startswith("avx512.mask.cvtdq2pd.") || // Added in 4.0
217 Name.startswith("avx512.mask.cvtudq2pd.") || // Added in 4.0
218 Name.startswith("avx512.mask.cvtudq2ps.") || // Added in 7.0 updated 9.0
219 Name.startswith("avx512.mask.cvtqq2pd.") || // Added in 7.0 updated 9.0
220 Name.startswith("avx512.mask.cvtuqq2pd.") || // Added in 7.0 updated 9.0
221 Name.startswith("avx512.mask.cvtdq2ps.") || // Added in 7.0 updated 9.0
222 Name == "avx512.mask.vcvtph2ps.128" || // Added in 11.0
223 Name == "avx512.mask.vcvtph2ps.256" || // Added in 11.0
224 Name == "avx512.mask.cvtqq2ps.256" || // Added in 9.0
225 Name == "avx512.mask.cvtqq2ps.512" || // Added in 9.0
226 Name == "avx512.mask.cvtuqq2ps.256" || // Added in 9.0
227 Name == "avx512.mask.cvtuqq2ps.512" || // Added in 9.0
228 Name == "avx512.mask.cvtpd2dq.256" || // Added in 7.0
229 Name == "avx512.mask.cvtpd2ps.256" || // Added in 7.0
230 Name == "avx512.mask.cvttpd2dq.256" || // Added in 7.0
231 Name == "avx512.mask.cvttps2dq.128" || // Added in 7.0
232 Name == "avx512.mask.cvttps2dq.256" || // Added in 7.0
233 Name == "avx512.mask.cvtps2pd.128" || // Added in 7.0
234 Name == "avx512.mask.cvtps2pd.256" || // Added in 7.0
235 Name == "avx512.cvtusi2sd" || // Added in 7.0
236 Name.startswith("avx512.mask.permvar.") || // Added in 7.0
237 Name == "sse2.pmulu.dq" || // Added in 7.0
238 Name == "sse41.pmuldq" || // Added in 7.0
239 Name == "avx2.pmulu.dq" || // Added in 7.0
240 Name == "avx2.pmul.dq" || // Added in 7.0
241 Name == "avx512.pmulu.dq.512" || // Added in 7.0
242 Name == "avx512.pmul.dq.512" || // Added in 7.0
243 Name.startswith("avx512.mask.pmul.dq.") || // Added in 4.0
244 Name.startswith("avx512.mask.pmulu.dq.") || // Added in 4.0
245 Name.startswith("avx512.mask.pmul.hr.sw.") || // Added in 7.0
246 Name.startswith("avx512.mask.pmulh.w.") || // Added in 7.0
247 Name.startswith("avx512.mask.pmulhu.w.") || // Added in 7.0
248 Name.startswith("avx512.mask.pmaddw.d.") || // Added in 7.0
249 Name.startswith("avx512.mask.pmaddubs.w.") || // Added in 7.0
250 Name.startswith("avx512.mask.packsswb.") || // Added in 5.0
251 Name.startswith("avx512.mask.packssdw.") || // Added in 5.0
252 Name.startswith("avx512.mask.packuswb.") || // Added in 5.0
253 Name.startswith("avx512.mask.packusdw.") || // Added in 5.0
254 Name.startswith("avx512.mask.cmp.b") || // Added in 5.0
255 Name.startswith("avx512.mask.cmp.d") || // Added in 5.0
256 Name.startswith("avx512.mask.cmp.q") || // Added in 5.0
257 Name.startswith("avx512.mask.cmp.w") || // Added in 5.0
258 Name.startswith("avx512.cmp.p") || // Added in 12.0
259 Name.startswith("avx512.mask.ucmp.") || // Added in 5.0
260 Name.startswith("avx512.cvtb2mask.") || // Added in 7.0
261 Name.startswith("avx512.cvtw2mask.") || // Added in 7.0
262 Name.startswith("avx512.cvtd2mask.") || // Added in 7.0
263 Name.startswith("avx512.cvtq2mask.") || // Added in 7.0
264 Name.startswith("avx512.mask.vpermilvar.") || // Added in 4.0
265 Name.startswith("avx512.mask.psll.d") || // Added in 4.0
266 Name.startswith("avx512.mask.psll.q") || // Added in 4.0
267 Name.startswith("avx512.mask.psll.w") || // Added in 4.0
268 Name.startswith("avx512.mask.psra.d") || // Added in 4.0
269 Name.startswith("avx512.mask.psra.q") || // Added in 4.0
270 Name.startswith("avx512.mask.psra.w") || // Added in 4.0
271 Name.startswith("avx512.mask.psrl.d") || // Added in 4.0
272 Name.startswith("avx512.mask.psrl.q") || // Added in 4.0
273 Name.startswith("avx512.mask.psrl.w") || // Added in 4.0
274 Name.startswith("avx512.mask.pslli") || // Added in 4.0
275 Name.startswith("avx512.mask.psrai") || // Added in 4.0
276 Name.startswith("avx512.mask.psrli") || // Added in 4.0
277 Name.startswith("avx512.mask.psllv") || // Added in 4.0
278 Name.startswith("avx512.mask.psrav") || // Added in 4.0
279 Name.startswith("avx512.mask.psrlv") || // Added in 4.0
280 Name.startswith("sse41.pmovsx") || // Added in 3.8
281 Name.startswith("sse41.pmovzx") || // Added in 3.9
282 Name.startswith("avx2.pmovsx") || // Added in 3.9
283 Name.startswith("avx2.pmovzx") || // Added in 3.9
284 Name.startswith("avx512.mask.pmovsx") || // Added in 4.0
285 Name.startswith("avx512.mask.pmovzx") || // Added in 4.0
286 Name.startswith("avx512.mask.lzcnt.") || // Added in 5.0
287 Name.startswith("avx512.mask.pternlog.") || // Added in 7.0
288 Name.startswith("avx512.maskz.pternlog.") || // Added in 7.0
289 Name.startswith("avx512.mask.vpmadd52") || // Added in 7.0
290 Name.startswith("avx512.maskz.vpmadd52") || // Added in 7.0
291 Name.startswith("avx512.mask.vpermi2var.") || // Added in 7.0
292 Name.startswith("avx512.mask.vpermt2var.") || // Added in 7.0
293 Name.startswith("avx512.maskz.vpermt2var.") || // Added in 7.0
294 Name.startswith("avx512.mask.vpdpbusd.") || // Added in 7.0
295 Name.startswith("avx512.maskz.vpdpbusd.") || // Added in 7.0
296 Name.startswith("avx512.mask.vpdpbusds.") || // Added in 7.0
297 Name.startswith("avx512.maskz.vpdpbusds.") || // Added in 7.0
298 Name.startswith("avx512.mask.vpdpwssd.") || // Added in 7.0
299 Name.startswith("avx512.maskz.vpdpwssd.") || // Added in 7.0
300 Name.startswith("avx512.mask.vpdpwssds.") || // Added in 7.0
301 Name.startswith("avx512.maskz.vpdpwssds.") || // Added in 7.0
302 Name.startswith("avx512.mask.dbpsadbw.") || // Added in 7.0
303 Name.startswith("avx512.mask.vpshld.") || // Added in 7.0
304 Name.startswith("avx512.mask.vpshrd.") || // Added in 7.0
305 Name.startswith("avx512.mask.vpshldv.") || // Added in 8.0
306 Name.startswith("avx512.mask.vpshrdv.") || // Added in 8.0
307 Name.startswith("avx512.maskz.vpshldv.") || // Added in 8.0
308 Name.startswith("avx512.maskz.vpshrdv.") || // Added in 8.0
309 Name.startswith("avx512.vpshld.") || // Added in 8.0
310 Name.startswith("avx512.vpshrd.") || // Added in 8.0
311 Name.startswith("avx512.mask.add.p") || // Added in 7.0. 128/256 in 4.0
312 Name.startswith("avx512.mask.sub.p") || // Added in 7.0. 128/256 in 4.0
313 Name.startswith("avx512.mask.mul.p") || // Added in 7.0. 128/256 in 4.0
314 Name.startswith("avx512.mask.div.p") || // Added in 7.0. 128/256 in 4.0
315 Name.startswith("avx512.mask.max.p") || // Added in 7.0. 128/256 in 5.0
316 Name.startswith("avx512.mask.min.p") || // Added in 7.0. 128/256 in 5.0
317 Name.startswith("avx512.mask.fpclass.p") || // Added in 7.0
318 Name.startswith("avx512.mask.vpshufbitqmb.") || // Added in 8.0
319 Name.startswith("avx512.mask.pmultishift.qb.") || // Added in 8.0
320 Name.startswith("avx512.mask.conflict.") || // Added in 9.0
321 Name == "avx512.mask.pmov.qd.256" || // Added in 9.0
322 Name == "avx512.mask.pmov.qd.512" || // Added in 9.0
323 Name == "avx512.mask.pmov.wb.256" || // Added in 9.0
324 Name == "avx512.mask.pmov.wb.512" || // Added in 9.0
325 Name == "sse.cvtsi2ss" || // Added in 7.0
326 Name == "sse.cvtsi642ss" || // Added in 7.0
327 Name == "sse2.cvtsi2sd" || // Added in 7.0
328 Name == "sse2.cvtsi642sd" || // Added in 7.0
329 Name == "sse2.cvtss2sd" || // Added in 7.0
330 Name == "sse2.cvtdq2pd" || // Added in 3.9
331 Name == "sse2.cvtdq2ps" || // Added in 7.0
332 Name == "sse2.cvtps2pd" || // Added in 3.9
333 Name == "avx.cvtdq2.pd.256" || // Added in 3.9
334 Name == "avx.cvtdq2.ps.256" || // Added in 7.0
335 Name == "avx.cvt.ps2.pd.256" || // Added in 3.9
336 Name.startswith("vcvtph2ps.") || // Added in 11.0
337 Name.startswith("avx.vinsertf128.") || // Added in 3.7
338 Name == "avx2.vinserti128" || // Added in 3.7
339 Name.startswith("avx512.mask.insert") || // Added in 4.0
340 Name.startswith("avx.vextractf128.") || // Added in 3.7
341 Name == "avx2.vextracti128" || // Added in 3.7
342 Name.startswith("avx512.mask.vextract") || // Added in 4.0
343 Name.startswith("sse4a.movnt.") || // Added in 3.9
344 Name.startswith("avx.movnt.") || // Added in 3.2
345 Name.startswith("avx512.storent.") || // Added in 3.9
346 Name == "sse41.movntdqa" || // Added in 5.0
347 Name == "avx2.movntdqa" || // Added in 5.0
348 Name == "avx512.movntdqa" || // Added in 5.0
349 Name == "sse2.storel.dq" || // Added in 3.9
350 Name.startswith("sse.storeu.") || // Added in 3.9
351 Name.startswith("sse2.storeu.") || // Added in 3.9
352 Name.startswith("avx.storeu.") || // Added in 3.9
353 Name.startswith("avx512.mask.storeu.") || // Added in 3.9
354 Name.startswith("avx512.mask.store.p") || // Added in 3.9
355 Name.startswith("avx512.mask.store.b.") || // Added in 3.9
356 Name.startswith("avx512.mask.store.w.") || // Added in 3.9
357 Name.startswith("avx512.mask.store.d.") || // Added in 3.9
358 Name.startswith("avx512.mask.store.q.") || // Added in 3.9
359 Name == "avx512.mask.store.ss" || // Added in 7.0
360 Name.startswith("avx512.mask.loadu.") || // Added in 3.9
361 Name.startswith("avx512.mask.load.") || // Added in 3.9
362 Name.startswith("avx512.mask.expand.load.") || // Added in 7.0
363 Name.startswith("avx512.mask.compress.store.") || // Added in 7.0
364 Name.startswith("avx512.mask.expand.b") || // Added in 9.0
365 Name.startswith("avx512.mask.expand.w") || // Added in 9.0
366 Name.startswith("avx512.mask.expand.d") || // Added in 9.0
367 Name.startswith("avx512.mask.expand.q") || // Added in 9.0
368 Name.startswith("avx512.mask.expand.p") || // Added in 9.0
369 Name.startswith("avx512.mask.compress.b") || // Added in 9.0
370 Name.startswith("avx512.mask.compress.w") || // Added in 9.0
371 Name.startswith("avx512.mask.compress.d") || // Added in 9.0
372 Name.startswith("avx512.mask.compress.q") || // Added in 9.0
373 Name.startswith("avx512.mask.compress.p") || // Added in 9.0
374 Name == "sse42.crc32.64.8" || // Added in 3.4
375 Name.startswith("avx.vbroadcast.s") || // Added in 3.5
376 Name.startswith("avx512.vbroadcast.s") || // Added in 7.0
377 Name.startswith("avx512.mask.palignr.") || // Added in 3.9
378 Name.startswith("avx512.mask.valign.") || // Added in 4.0
379 Name.startswith("sse2.psll.dq") || // Added in 3.7
380 Name.startswith("sse2.psrl.dq") || // Added in 3.7
381 Name.startswith("avx2.psll.dq") || // Added in 3.7
382 Name.startswith("avx2.psrl.dq") || // Added in 3.7
383 Name.startswith("avx512.psll.dq") || // Added in 3.9
384 Name.startswith("avx512.psrl.dq") || // Added in 3.9
385 Name == "sse41.pblendw" || // Added in 3.7
386 Name.startswith("sse41.blendp") || // Added in 3.7
387 Name.startswith("avx.blend.p") || // Added in 3.7
388 Name == "avx2.pblendw" || // Added in 3.7
389 Name.startswith("avx2.pblendd.") || // Added in 3.7
390 Name.startswith("avx.vbroadcastf128") || // Added in 4.0
391 Name == "avx2.vbroadcasti128" || // Added in 3.7
392 Name.startswith("avx512.mask.broadcastf32x4.") || // Added in 6.0
393 Name.startswith("avx512.mask.broadcastf64x2.") || // Added in 6.0
394 Name.startswith("avx512.mask.broadcastf32x8.") || // Added in 6.0
395 Name.startswith("avx512.mask.broadcastf64x4.") || // Added in 6.0
396 Name.startswith("avx512.mask.broadcasti32x4.") || // Added in 6.0
397 Name.startswith("avx512.mask.broadcasti64x2.") || // Added in 6.0
398 Name.startswith("avx512.mask.broadcasti32x8.") || // Added in 6.0
399 Name.startswith("avx512.mask.broadcasti64x4.") || // Added in 6.0
400 Name == "xop.vpcmov" || // Added in 3.8
401 Name == "xop.vpcmov.256" || // Added in 5.0
402 Name.startswith("avx512.mask.move.s") || // Added in 4.0
403 Name.startswith("avx512.cvtmask2") || // Added in 5.0
404 Name.startswith("xop.vpcom") || // Added in 3.2, Updated in 9.0
405 Name.startswith("xop.vprot") || // Added in 8.0
406 Name.startswith("avx512.prol") || // Added in 8.0
407 Name.startswith("avx512.pror") || // Added in 8.0
408 Name.startswith("avx512.mask.prorv.") || // Added in 8.0
409 Name.startswith("avx512.mask.pror.") || // Added in 8.0
410 Name.startswith("avx512.mask.prolv.") || // Added in 8.0
411 Name.startswith("avx512.mask.prol.") || // Added in 8.0
412 Name.startswith("avx512.ptestm") || //Added in 6.0
413 Name.startswith("avx512.ptestnm") || //Added in 6.0
414 Name.startswith("avx512.mask.pavg")) // Added in 6.0
415 return true;
416
417 return false;
418}
419
420static bool UpgradeX86IntrinsicFunction(Function *F, StringRef Name,
421 Function *&NewFn) {
422 // Only handle intrinsics that start with "x86.".
423 if (!Name.startswith("x86."))
424 return false;
425 // Remove "x86." prefix.
426 Name = Name.substr(4);
427
428 if (ShouldUpgradeX86Intrinsic(F, Name)) {
429 NewFn = nullptr;
430 return true;
431 }
432
433 if (Name == "rdtscp") { // Added in 8.0
434 // If this intrinsic has 0 operands, it's the new version.
435 if (F->getFunctionType()->getNumParams() == 0)
436 return false;
437
438 rename(F);
439 NewFn = Intrinsic::getDeclaration(F->getParent(),
440 Intrinsic::x86_rdtscp);
441 return true;
442 }
443
444 // SSE4.1 ptest functions may have an old signature.
445 if (Name.startswith("sse41.ptest")) { // Added in 3.2
446 if (Name.substr(11) == "c")
447 return UpgradePTESTIntrinsic(F, Intrinsic::x86_sse41_ptestc, NewFn);
448 if (Name.substr(11) == "z")
449 return UpgradePTESTIntrinsic(F, Intrinsic::x86_sse41_ptestz, NewFn);
450 if (Name.substr(11) == "nzc")
451 return UpgradePTESTIntrinsic(F, Intrinsic::x86_sse41_ptestnzc, NewFn);
452 }
453 // Several blend and other instructions with masks used the wrong number of
454 // bits.
455 if (Name == "sse41.insertps") // Added in 3.6
456 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_insertps,
457 NewFn);
458 if (Name == "sse41.dppd") // Added in 3.6
459 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_dppd,
460 NewFn);
461 if (Name == "sse41.dpps") // Added in 3.6
462 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_dpps,
463 NewFn);
464 if (Name == "sse41.mpsadbw") // Added in 3.6
465 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_mpsadbw,
466 NewFn);
467 if (Name == "avx.dp.ps.256") // Added in 3.6
468 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_avx_dp_ps_256,
469 NewFn);
470 if (Name == "avx2.mpsadbw") // Added in 3.6
471 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_avx2_mpsadbw,
472 NewFn);
473 if (Name == "avx512.mask.cmp.pd.128") // Added in 7.0
474 return UpgradeX86MaskedFPCompare(F, Intrinsic::x86_avx512_mask_cmp_pd_128,
475 NewFn);
476 if (Name == "avx512.mask.cmp.pd.256") // Added in 7.0
477 return UpgradeX86MaskedFPCompare(F, Intrinsic::x86_avx512_mask_cmp_pd_256,
478 NewFn);
479 if (Name == "avx512.mask.cmp.pd.512") // Added in 7.0
480 return UpgradeX86MaskedFPCompare(F, Intrinsic::x86_avx512_mask_cmp_pd_512,
481 NewFn);
482 if (Name == "avx512.mask.cmp.ps.128") // Added in 7.0
483 return UpgradeX86MaskedFPCompare(F, Intrinsic::x86_avx512_mask_cmp_ps_128,
484 NewFn);
485 if (Name == "avx512.mask.cmp.ps.256") // Added in 7.0
486 return UpgradeX86MaskedFPCompare(F, Intrinsic::x86_avx512_mask_cmp_ps_256,
487 NewFn);
488 if (Name == "avx512.mask.cmp.ps.512") // Added in 7.0
489 return UpgradeX86MaskedFPCompare(F, Intrinsic::x86_avx512_mask_cmp_ps_512,
490 NewFn);
491
492 // frcz.ss/sd may need to have an argument dropped. Added in 3.2
493 if (Name.startswith("xop.vfrcz.ss") && F->arg_size() == 2) {
494 rename(F);
495 NewFn = Intrinsic::getDeclaration(F->getParent(),
496 Intrinsic::x86_xop_vfrcz_ss);
497 return true;
498 }
499 if (Name.startswith("xop.vfrcz.sd") && F->arg_size() == 2) {
500 rename(F);
501 NewFn = Intrinsic::getDeclaration(F->getParent(),
502 Intrinsic::x86_xop_vfrcz_sd);
503 return true;
504 }
505 // Upgrade any XOP PERMIL2 index operand still using a float/double vector.
506 if (Name.startswith("xop.vpermil2")) { // Added in 3.9
507 auto Idx = F->getFunctionType()->getParamType(2);
508 if (Idx->isFPOrFPVectorTy()) {
509 rename(F);
510 unsigned IdxSize = Idx->getPrimitiveSizeInBits();
511 unsigned EltSize = Idx->getScalarSizeInBits();
512 Intrinsic::ID Permil2ID;
513 if (EltSize == 64 && IdxSize == 128)
514 Permil2ID = Intrinsic::x86_xop_vpermil2pd;
515 else if (EltSize == 32 && IdxSize == 128)
516 Permil2ID = Intrinsic::x86_xop_vpermil2ps;
517 else if (EltSize == 64 && IdxSize == 256)
518 Permil2ID = Intrinsic::x86_xop_vpermil2pd_256;
519 else
520 Permil2ID = Intrinsic::x86_xop_vpermil2ps_256;
521 NewFn = Intrinsic::getDeclaration(F->getParent(), Permil2ID);
522 return true;
523 }
524 }
525
526 if (Name == "seh.recoverfp") {
527 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::eh_recoverfp);
528 return true;
529 }
530
531 return false;
532}
533
534static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
535 assert(F && "Illegal to upgrade a non-existent Function.")(static_cast <bool> (F && "Illegal to upgrade a non-existent Function."
) ? void (0) : __assert_fail ("F && \"Illegal to upgrade a non-existent Function.\""
, "llvm/lib/IR/AutoUpgrade.cpp", 535, __extension__ __PRETTY_FUNCTION__
))
;
536
537 // Quickly eliminate it, if it's not a candidate.
538 StringRef Name = F->getName();
539 if (Name.size() <= 8 || !Name.startswith("llvm."))
540 return false;
541 Name = Name.substr(5); // Strip off "llvm."
542
543 switch (Name[0]) {
544 default: break;
545 case 'a': {
546 if (Name.startswith("arm.rbit") || Name.startswith("aarch64.rbit")) {
547 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::bitreverse,
548 F->arg_begin()->getType());
549 return true;
550 }
551 if (Name.startswith("aarch64.neon.frintn")) {
552 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::roundeven,
553 F->arg_begin()->getType());
554 return true;
555 }
556 if (Name.startswith("aarch64.neon.rbit")) {
557 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::bitreverse,
558 F->arg_begin()->getType());
559 return true;
560 }
561 if (Name.startswith("arm.neon.vclz")) {
562 Type* args[2] = {
563 F->arg_begin()->getType(),
564 Type::getInt1Ty(F->getContext())
565 };
566 // Can't use Intrinsic::getDeclaration here as it adds a ".i1" to
567 // the end of the name. Change name from llvm.arm.neon.vclz.* to
568 // llvm.ctlz.*
569 FunctionType* fType = FunctionType::get(F->getReturnType(), args, false);
570 NewFn = Function::Create(fType, F->getLinkage(), F->getAddressSpace(),
571 "llvm.ctlz." + Name.substr(14), F->getParent());
572 return true;
573 }
574 if (Name.startswith("arm.neon.vcnt")) {
575 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctpop,
576 F->arg_begin()->getType());
577 return true;
578 }
579 static const Regex vldRegex("^arm\\.neon\\.vld([1234]|[234]lane)\\.v[a-z0-9]*$");
580 if (vldRegex.match(Name)) {
581 auto fArgs = F->getFunctionType()->params();
582 SmallVector<Type *, 4> Tys(fArgs.begin(), fArgs.end());
583 // Can't use Intrinsic::getDeclaration here as the return types might
584 // then only be structurally equal.
585 FunctionType* fType = FunctionType::get(F->getReturnType(), Tys, false);
586 StringRef Suffix =
587 F->getContext().supportsTypedPointers() ? "p0i8" : "p0";
588 NewFn = Function::Create(fType, F->getLinkage(), F->getAddressSpace(),
589 "llvm." + Name + "." + Suffix, F->getParent());
590 return true;
591 }
592 static const Regex vstRegex("^arm\\.neon\\.vst([1234]|[234]lane)\\.v[a-z0-9]*$");
593 if (vstRegex.match(Name)) {
594 static const Intrinsic::ID StoreInts[] = {Intrinsic::arm_neon_vst1,
595 Intrinsic::arm_neon_vst2,
596 Intrinsic::arm_neon_vst3,
597 Intrinsic::arm_neon_vst4};
598
599 static const Intrinsic::ID StoreLaneInts[] = {
600 Intrinsic::arm_neon_vst2lane, Intrinsic::arm_neon_vst3lane,
601 Intrinsic::arm_neon_vst4lane
602 };
603
604 auto fArgs = F->getFunctionType()->params();
605 Type *Tys[] = {fArgs[0], fArgs[1]};
606 if (!Name.contains("lane"))
607 NewFn = Intrinsic::getDeclaration(F->getParent(),
608 StoreInts[fArgs.size() - 3], Tys);
609 else
610 NewFn = Intrinsic::getDeclaration(F->getParent(),
611 StoreLaneInts[fArgs.size() - 5], Tys);
612 return true;
613 }
614 if (Name == "aarch64.thread.pointer" || Name == "arm.thread.pointer") {
615 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::thread_pointer);
616 return true;
617 }
618 if (Name.startswith("arm.neon.vqadds.")) {
619 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::sadd_sat,
620 F->arg_begin()->getType());
621 return true;
622 }
623 if (Name.startswith("arm.neon.vqaddu.")) {
624 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::uadd_sat,
625 F->arg_begin()->getType());
626 return true;
627 }
628 if (Name.startswith("arm.neon.vqsubs.")) {
629 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ssub_sat,
630 F->arg_begin()->getType());
631 return true;
632 }
633 if (Name.startswith("arm.neon.vqsubu.")) {
634 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::usub_sat,
635 F->arg_begin()->getType());
636 return true;
637 }
638 if (Name.startswith("aarch64.neon.addp")) {
639 if (F->arg_size() != 2)
640 break; // Invalid IR.
641 VectorType *Ty = dyn_cast<VectorType>(F->getReturnType());
642 if (Ty && Ty->getElementType()->isFloatingPointTy()) {
643 NewFn = Intrinsic::getDeclaration(F->getParent(),
644 Intrinsic::aarch64_neon_faddp, Ty);
645 return true;
646 }
647 }
648
649 // Changed in 12.0: bfdot accept v4bf16 and v8bf16 instead of v8i8 and v16i8
650 // respectively
651 if ((Name.startswith("arm.neon.bfdot.") ||
652 Name.startswith("aarch64.neon.bfdot.")) &&
653 Name.endswith("i8")) {
654 Intrinsic::ID IID =
655 StringSwitch<Intrinsic::ID>(Name)
656 .Cases("arm.neon.bfdot.v2f32.v8i8",
657 "arm.neon.bfdot.v4f32.v16i8",
658 Intrinsic::arm_neon_bfdot)
659 .Cases("aarch64.neon.bfdot.v2f32.v8i8",
660 "aarch64.neon.bfdot.v4f32.v16i8",
661 Intrinsic::aarch64_neon_bfdot)
662 .Default(Intrinsic::not_intrinsic);
663 if (IID == Intrinsic::not_intrinsic)
664 break;
665
666 size_t OperandWidth = F->getReturnType()->getPrimitiveSizeInBits();
667 assert((OperandWidth == 64 || OperandWidth == 128) &&(static_cast <bool> ((OperandWidth == 64 || OperandWidth
== 128) && "Unexpected operand width") ? void (0) : __assert_fail
("(OperandWidth == 64 || OperandWidth == 128) && \"Unexpected operand width\""
, "llvm/lib/IR/AutoUpgrade.cpp", 668, __extension__ __PRETTY_FUNCTION__
))
668 "Unexpected operand width")(static_cast <bool> ((OperandWidth == 64 || OperandWidth
== 128) && "Unexpected operand width") ? void (0) : __assert_fail
("(OperandWidth == 64 || OperandWidth == 128) && \"Unexpected operand width\""
, "llvm/lib/IR/AutoUpgrade.cpp", 668, __extension__ __PRETTY_FUNCTION__
))
;
669 LLVMContext &Ctx = F->getParent()->getContext();
670 std::array<Type *, 2> Tys {{
671 F->getReturnType(),
672 FixedVectorType::get(Type::getBFloatTy(Ctx), OperandWidth / 16)
673 }};
674 NewFn = Intrinsic::getDeclaration(F->getParent(), IID, Tys);
675 return true;
676 }
677
678 // Changed in 12.0: bfmmla, bfmlalb and bfmlalt are not polymorphic anymore
679 // and accept v8bf16 instead of v16i8
680 if ((Name.startswith("arm.neon.bfm") ||
681 Name.startswith("aarch64.neon.bfm")) &&
682 Name.endswith(".v4f32.v16i8")) {
683 Intrinsic::ID IID =
684 StringSwitch<Intrinsic::ID>(Name)
685 .Case("arm.neon.bfmmla.v4f32.v16i8",
686 Intrinsic::arm_neon_bfmmla)
687 .Case("arm.neon.bfmlalb.v4f32.v16i8",
688 Intrinsic::arm_neon_bfmlalb)
689 .Case("arm.neon.bfmlalt.v4f32.v16i8",
690 Intrinsic::arm_neon_bfmlalt)
691 .Case("aarch64.neon.bfmmla.v4f32.v16i8",
692 Intrinsic::aarch64_neon_bfmmla)
693 .Case("aarch64.neon.bfmlalb.v4f32.v16i8",
694 Intrinsic::aarch64_neon_bfmlalb)
695 .Case("aarch64.neon.bfmlalt.v4f32.v16i8",
696 Intrinsic::aarch64_neon_bfmlalt)
697 .Default(Intrinsic::not_intrinsic);
698 if (IID == Intrinsic::not_intrinsic)
699 break;
700
701 std::array<Type *, 0> Tys;
702 NewFn = Intrinsic::getDeclaration(F->getParent(), IID, Tys);
703 return true;
704 }
705
706 if (Name == "arm.mve.vctp64" &&
707 cast<FixedVectorType>(F->getReturnType())->getNumElements() == 4) {
708 // A vctp64 returning a v4i1 is converted to return a v2i1. Rename the
709 // function and deal with it below in UpgradeIntrinsicCall.
710 rename(F);
711 return true;
712 }
713 // These too are changed to accept a v2i1 insteead of the old v4i1.
714 if (Name == "arm.mve.mull.int.predicated.v2i64.v4i32.v4i1" ||
715 Name == "arm.mve.vqdmull.predicated.v2i64.v4i32.v4i1" ||
716 Name == "arm.mve.vldr.gather.base.predicated.v2i64.v2i64.v4i1" ||
717 Name == "arm.mve.vldr.gather.base.wb.predicated.v2i64.v2i64.v4i1" ||
718 Name == "arm.mve.vldr.gather.offset.predicated.v2i64.p0i64.v2i64.v4i1" ||
719 Name == "arm.mve.vstr.scatter.base.predicated.v2i64.v2i64.v4i1" ||
720 Name == "arm.mve.vstr.scatter.base.wb.predicated.v2i64.v2i64.v4i1" ||
721 Name == "arm.mve.vstr.scatter.offset.predicated.p0i64.v2i64.v2i64.v4i1" ||
722 Name == "arm.cde.vcx1q.predicated.v2i64.v4i1" ||
723 Name == "arm.cde.vcx1qa.predicated.v2i64.v4i1" ||
724 Name == "arm.cde.vcx2q.predicated.v2i64.v4i1" ||
725 Name == "arm.cde.vcx2qa.predicated.v2i64.v4i1" ||
726 Name == "arm.cde.vcx3q.predicated.v2i64.v4i1" ||
727 Name == "arm.cde.vcx3qa.predicated.v2i64.v4i1")
728 return true;
729
730 if (Name == "amdgcn.alignbit") {
731 // Target specific intrinsic became redundant
732 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::fshr,
733 {F->getReturnType()});
734 return true;
735 }
736
737 break;
738 }
739
740 case 'c': {
741 if (Name.startswith("ctlz.") && F->arg_size() == 1) {
742 rename(F);
743 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctlz,
744 F->arg_begin()->getType());
745 return true;
746 }
747 if (Name.startswith("cttz.") && F->arg_size() == 1) {
748 rename(F);
749 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::cttz,
750 F->arg_begin()->getType());
751 return true;
752 }
753 break;
754 }
755 case 'd': {
756 if (Name == "dbg.value" && F->arg_size() == 4) {
757 rename(F);
758 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::dbg_value);
759 return true;
760 }
761 break;
762 }
763 case 'e': {
764 SmallVector<StringRef, 2> Groups;
765 static const Regex R("^experimental.vector.reduce.([a-z]+)\\.[a-z][0-9]+");
766 if (R.match(Name, &Groups)) {
767 Intrinsic::ID ID;
768 ID = StringSwitch<Intrinsic::ID>(Groups[1])
769 .Case("add", Intrinsic::vector_reduce_add)
770 .Case("mul", Intrinsic::vector_reduce_mul)
771 .Case("and", Intrinsic::vector_reduce_and)
772 .Case("or", Intrinsic::vector_reduce_or)
773 .Case("xor", Intrinsic::vector_reduce_xor)
774 .Case("smax", Intrinsic::vector_reduce_smax)
775 .Case("smin", Intrinsic::vector_reduce_smin)
776 .Case("umax", Intrinsic::vector_reduce_umax)
777 .Case("umin", Intrinsic::vector_reduce_umin)
778 .Case("fmax", Intrinsic::vector_reduce_fmax)
779 .Case("fmin", Intrinsic::vector_reduce_fmin)
780 .Default(Intrinsic::not_intrinsic);
781 if (ID != Intrinsic::not_intrinsic) {
782 rename(F);
783 auto Args = F->getFunctionType()->params();
784 NewFn = Intrinsic::getDeclaration(F->getParent(), ID, {Args[0]});
785 return true;
786 }
787 }
788 static const Regex R2(
789 "^experimental.vector.reduce.v2.([a-z]+)\\.[fi][0-9]+");
790 Groups.clear();
791 if (R2.match(Name, &Groups)) {
792 Intrinsic::ID ID = Intrinsic::not_intrinsic;
793 if (Groups[1] == "fadd")
794 ID = Intrinsic::vector_reduce_fadd;
795 if (Groups[1] == "fmul")
796 ID = Intrinsic::vector_reduce_fmul;
797 if (ID != Intrinsic::not_intrinsic) {
798 rename(F);
799 auto Args = F->getFunctionType()->params();
800 Type *Tys[] = {Args[1]};
801 NewFn = Intrinsic::getDeclaration(F->getParent(), ID, Tys);
802 return true;
803 }
804 }
805 break;
806 }
807 case 'i':
808 case 'l': {
809 bool IsLifetimeStart = Name.startswith("lifetime.start");
810 if (IsLifetimeStart || Name.startswith("invariant.start")) {
811 Intrinsic::ID ID = IsLifetimeStart ?
812 Intrinsic::lifetime_start : Intrinsic::invariant_start;
813 auto Args = F->getFunctionType()->params();
814 Type* ObjectPtr[1] = {Args[1]};
815 if (F->getName() != Intrinsic::getName(ID, ObjectPtr, F->getParent())) {
816 rename(F);
817 NewFn = Intrinsic::getDeclaration(F->getParent(), ID, ObjectPtr);
818 return true;
819 }
820 }
821
822 bool IsLifetimeEnd = Name.startswith("lifetime.end");
823 if (IsLifetimeEnd || Name.startswith("invariant.end")) {
824 Intrinsic::ID ID = IsLifetimeEnd ?
825 Intrinsic::lifetime_end : Intrinsic::invariant_end;
826
827 auto Args = F->getFunctionType()->params();
828 Type* ObjectPtr[1] = {Args[IsLifetimeEnd ? 1 : 2]};
829 if (F->getName() != Intrinsic::getName(ID, ObjectPtr, F->getParent())) {
830 rename(F);
831 NewFn = Intrinsic::getDeclaration(F->getParent(), ID, ObjectPtr);
832 return true;
833 }
834 }
835 if (Name.startswith("invariant.group.barrier")) {
836 // Rename invariant.group.barrier to launder.invariant.group
837 auto Args = F->getFunctionType()->params();
838 Type* ObjectPtr[1] = {Args[0]};
839 rename(F);
840 NewFn = Intrinsic::getDeclaration(F->getParent(),
841 Intrinsic::launder_invariant_group, ObjectPtr);
842 return true;
843
844 }
845
846 break;
847 }
848 case 'm': {
849 if (Name.startswith("masked.load.")) {
850 Type *Tys[] = { F->getReturnType(), F->arg_begin()->getType() };
851 if (F->getName() !=
852 Intrinsic::getName(Intrinsic::masked_load, Tys, F->getParent())) {
853 rename(F);
854 NewFn = Intrinsic::getDeclaration(F->getParent(),
855 Intrinsic::masked_load,
856 Tys);
857 return true;
858 }
859 }
860 if (Name.startswith("masked.store.")) {
861 auto Args = F->getFunctionType()->params();
862 Type *Tys[] = { Args[0], Args[1] };
863 if (F->getName() !=
864 Intrinsic::getName(Intrinsic::masked_store, Tys, F->getParent())) {
865 rename(F);
866 NewFn = Intrinsic::getDeclaration(F->getParent(),
867 Intrinsic::masked_store,
868 Tys);
869 return true;
870 }
871 }
872 // Renaming gather/scatter intrinsics with no address space overloading
873 // to the new overload which includes an address space
874 if (Name.startswith("masked.gather.")) {
875 Type *Tys[] = {F->getReturnType(), F->arg_begin()->getType()};
876 if (F->getName() !=
877 Intrinsic::getName(Intrinsic::masked_gather, Tys, F->getParent())) {
878 rename(F);
879 NewFn = Intrinsic::getDeclaration(F->getParent(),
880 Intrinsic::masked_gather, Tys);
881 return true;
882 }
883 }
884 if (Name.startswith("masked.scatter.")) {
885 auto Args = F->getFunctionType()->params();
886 Type *Tys[] = {Args[0], Args[1]};
887 if (F->getName() !=
888 Intrinsic::getName(Intrinsic::masked_scatter, Tys, F->getParent())) {
889 rename(F);
890 NewFn = Intrinsic::getDeclaration(F->getParent(),
891 Intrinsic::masked_scatter, Tys);
892 return true;
893 }
894 }
895 // Updating the memory intrinsics (memcpy/memmove/memset) that have an
896 // alignment parameter to embedding the alignment as an attribute of
897 // the pointer args.
898 if (Name.startswith("memcpy.") && F->arg_size() == 5) {
899 rename(F);
900 // Get the types of dest, src, and len
901 ArrayRef<Type *> ParamTypes = F->getFunctionType()->params().slice(0, 3);
902 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::memcpy,
903 ParamTypes);
904 return true;
905 }
906 if (Name.startswith("memmove.") && F->arg_size() == 5) {
907 rename(F);
908 // Get the types of dest, src, and len
909 ArrayRef<Type *> ParamTypes = F->getFunctionType()->params().slice(0, 3);
910 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::memmove,
911 ParamTypes);
912 return true;
913 }
914 if (Name.startswith("memset.") && F->arg_size() == 5) {
915 rename(F);
916 // Get the types of dest, and len
917 const auto *FT = F->getFunctionType();
918 Type *ParamTypes[2] = {
919 FT->getParamType(0), // Dest
920 FT->getParamType(2) // len
921 };
922 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::memset,
923 ParamTypes);
924 return true;
925 }
926 break;
927 }
928 case 'n': {
929 if (Name.startswith("nvvm.")) {
930 Name = Name.substr(5);
931
932 // The following nvvm intrinsics correspond exactly to an LLVM intrinsic.
933 Intrinsic::ID IID = StringSwitch<Intrinsic::ID>(Name)
934 .Cases("brev32", "brev64", Intrinsic::bitreverse)
935 .Case("clz.i", Intrinsic::ctlz)
936 .Case("popc.i", Intrinsic::ctpop)
937 .Default(Intrinsic::not_intrinsic);
938 if (IID != Intrinsic::not_intrinsic && F->arg_size() == 1) {
939 NewFn = Intrinsic::getDeclaration(F->getParent(), IID,
940 {F->getReturnType()});
941 return true;
942 }
943
944 // The following nvvm intrinsics correspond exactly to an LLVM idiom, but
945 // not to an intrinsic alone. We expand them in UpgradeIntrinsicCall.
946 //
947 // TODO: We could add lohi.i2d.
948 bool Expand = StringSwitch<bool>(Name)
949 .Cases("abs.i", "abs.ll", true)
950 .Cases("clz.ll", "popc.ll", "h2f", true)
951 .Cases("max.i", "max.ll", "max.ui", "max.ull", true)
952 .Cases("min.i", "min.ll", "min.ui", "min.ull", true)
953 .StartsWith("atomic.load.add.f32.p", true)
954 .StartsWith("atomic.load.add.f64.p", true)
955 .Default(false);
956 if (Expand) {
957 NewFn = nullptr;
958 return true;
959 }
960 }
961 break;
962 }
963 case 'o':
964 // We only need to change the name to match the mangling including the
965 // address space.
966 if (Name.startswith("objectsize.")) {
967 Type *Tys[2] = { F->getReturnType(), F->arg_begin()->getType() };
968 if (F->arg_size() == 2 || F->arg_size() == 3 ||
969 F->getName() !=
970 Intrinsic::getName(Intrinsic::objectsize, Tys, F->getParent())) {
971 rename(F);
972 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::objectsize,
973 Tys);
974 return true;
975 }
976 }
977 break;
978
979 case 'p':
980 if (Name == "prefetch") {
981 // Handle address space overloading.
982 Type *Tys[] = {F->arg_begin()->getType()};
983 if (F->getName() !=
984 Intrinsic::getName(Intrinsic::prefetch, Tys, F->getParent())) {
985 rename(F);
986 NewFn =
987 Intrinsic::getDeclaration(F->getParent(), Intrinsic::prefetch, Tys);
988 return true;
989 }
990 } else if (Name.startswith("ptr.annotation.") && F->arg_size() == 4) {
991 rename(F);
992 NewFn = Intrinsic::getDeclaration(F->getParent(),
993 Intrinsic::ptr_annotation,
994 F->arg_begin()->getType());
995 return true;
996 }
997 break;
998
999 case 's':
1000 if (Name == "stackprotectorcheck") {
1001 NewFn = nullptr;
1002 return true;
1003 }
1004 break;
1005
1006 case 'v': {
1007 if (Name == "var.annotation" && F->arg_size() == 4) {
1008 rename(F);
1009 NewFn = Intrinsic::getDeclaration(F->getParent(),
1010 Intrinsic::var_annotation);
1011 return true;
1012 }
1013 break;
1014 }
1015
1016 case 'x':
1017 if (UpgradeX86IntrinsicFunction(F, Name, NewFn))
1018 return true;
1019 }
1020 // Remangle our intrinsic since we upgrade the mangling
1021 auto Result = llvm::Intrinsic::remangleIntrinsicFunction(F);
1022 if (Result != None) {
1023 NewFn = Result.getValue();
1024 return true;
1025 }
1026
1027 // This may not belong here. This function is effectively being overloaded
1028 // to both detect an intrinsic which needs upgrading, and to provide the
1029 // upgraded form of the intrinsic. We should perhaps have two separate
1030 // functions for this.
1031 return false;
1032}
1033
1034bool llvm::UpgradeIntrinsicFunction(Function *F, Function *&NewFn) {
1035 NewFn = nullptr;
1036 bool Upgraded = UpgradeIntrinsicFunction1(F, NewFn);
1037 assert(F != NewFn && "Intrinsic function upgraded to the same function")(static_cast <bool> (F != NewFn && "Intrinsic function upgraded to the same function"
) ? void (0) : __assert_fail ("F != NewFn && \"Intrinsic function upgraded to the same function\""
, "llvm/lib/IR/AutoUpgrade.cpp", 1037, __extension__ __PRETTY_FUNCTION__
))
;
1038
1039 // Upgrade intrinsic attributes. This does not change the function.
1040 if (NewFn)
1041 F = NewFn;
1042 if (Intrinsic::ID id = F->getIntrinsicID())
1043 F->setAttributes(Intrinsic::getAttributes(F->getContext(), id));
1044 return Upgraded;
1045}
1046
1047GlobalVariable *llvm::UpgradeGlobalVariable(GlobalVariable *GV) {
1048 if (!(GV->hasName() && (GV->getName() == "llvm.global_ctors" ||
1049 GV->getName() == "llvm.global_dtors")) ||
1050 !GV->hasInitializer())
1051 return nullptr;
1052 ArrayType *ATy = dyn_cast<ArrayType>(GV->getValueType());
1053 if (!ATy)
1054 return nullptr;
1055 StructType *STy = dyn_cast<StructType>(ATy->getElementType());
1056 if (!STy || STy->getNumElements() != 2)
1057 return nullptr;
1058
1059 LLVMContext &C = GV->getContext();
1060 IRBuilder<> IRB(C);
1061 auto EltTy = StructType::get(STy->getElementType(0), STy->getElementType(1),
1062 IRB.getInt8PtrTy());
1063 Constant *Init = GV->getInitializer();
1064 unsigned N = Init->getNumOperands();
1065 std::vector<Constant *> NewCtors(N);
1066 for (unsigned i = 0; i != N; ++i) {
1067 auto Ctor = cast<Constant>(Init->getOperand(i));
1068 NewCtors[i] = ConstantStruct::get(
1069 EltTy, Ctor->getAggregateElement(0u), Ctor->getAggregateElement(1),
1070 Constant::getNullValue(IRB.getInt8PtrTy()));
1071 }
1072 Constant *NewInit = ConstantArray::get(ArrayType::get(EltTy, N), NewCtors);
1073
1074 return new GlobalVariable(NewInit->getType(), false, GV->getLinkage(),
1075 NewInit, GV->getName());
1076}
1077
1078// Handles upgrading SSE2/AVX2/AVX512BW PSLLDQ intrinsics by converting them
1079// to byte shuffles.
1080static Value *UpgradeX86PSLLDQIntrinsics(IRBuilder<> &Builder,
1081 Value *Op, unsigned Shift) {
1082 auto *ResultTy = cast<FixedVectorType>(Op->getType());
1083 unsigned NumElts = ResultTy->getNumElements() * 8;
1084
1085 // Bitcast from a 64-bit element type to a byte element type.
1086 Type *VecTy = FixedVectorType::get(Builder.getInt8Ty(), NumElts);
1087 Op = Builder.CreateBitCast(Op, VecTy, "cast");
1088
1089 // We'll be shuffling in zeroes.
1090 Value *Res = Constant::getNullValue(VecTy);
1091
1092 // If shift is less than 16, emit a shuffle to move the bytes. Otherwise,
1093 // we'll just return the zero vector.
1094 if (Shift < 16) {
1095 int Idxs[64];
1096 // 256/512-bit version is split into 2/4 16-byte lanes.
1097 for (unsigned l = 0; l != NumElts; l += 16)
1098 for (unsigned i = 0; i != 16; ++i) {
1099 unsigned Idx = NumElts + i - Shift;
1100 if (Idx < NumElts)
1101 Idx -= NumElts - 16; // end of lane, switch operand.
1102 Idxs[l + i] = Idx + l;
1103 }
1104
1105 Res = Builder.CreateShuffleVector(Res, Op, makeArrayRef(Idxs, NumElts));
1106 }
1107
1108 // Bitcast back to a 64-bit element type.
1109 return Builder.CreateBitCast(Res, ResultTy, "cast");
1110}
1111
1112// Handles upgrading SSE2/AVX2/AVX512BW PSRLDQ intrinsics by converting them
1113// to byte shuffles.
1114static Value *UpgradeX86PSRLDQIntrinsics(IRBuilder<> &Builder, Value *Op,
1115 unsigned Shift) {
1116 auto *ResultTy = cast<FixedVectorType>(Op->getType());
1117 unsigned NumElts = ResultTy->getNumElements() * 8;
1118
1119 // Bitcast from a 64-bit element type to a byte element type.
1120 Type *VecTy = FixedVectorType::get(Builder.getInt8Ty(), NumElts);
1121 Op = Builder.CreateBitCast(Op, VecTy, "cast");
1122
1123 // We'll be shuffling in zeroes.
1124 Value *Res = Constant::getNullValue(VecTy);
1125
1126 // If shift is less than 16, emit a shuffle to move the bytes. Otherwise,
1127 // we'll just return the zero vector.
1128 if (Shift < 16) {
1129 int Idxs[64];
1130 // 256/512-bit version is split into 2/4 16-byte lanes.
1131 for (unsigned l = 0; l != NumElts; l += 16)
1132 for (unsigned i = 0; i != 16; ++i) {
1133 unsigned Idx = i + Shift;
1134 if (Idx >= 16)
1135 Idx += NumElts - 16; // end of lane, switch operand.
1136 Idxs[l + i] = Idx + l;
1137 }
1138
1139 Res = Builder.CreateShuffleVector(Op, Res, makeArrayRef(Idxs, NumElts));
1140 }
1141
1142 // Bitcast back to a 64-bit element type.
1143 return Builder.CreateBitCast(Res, ResultTy, "cast");
1144}
1145
1146static Value *getX86MaskVec(IRBuilder<> &Builder, Value *Mask,
1147 unsigned NumElts) {
1148 assert(isPowerOf2_32(NumElts) && "Expected power-of-2 mask elements")(static_cast <bool> (isPowerOf2_32(NumElts) && "Expected power-of-2 mask elements"
) ? void (0) : __assert_fail ("isPowerOf2_32(NumElts) && \"Expected power-of-2 mask elements\""
, "llvm/lib/IR/AutoUpgrade.cpp", 1148, __extension__ __PRETTY_FUNCTION__
))
;
1149 llvm::VectorType *MaskTy = FixedVectorType::get(
1150 Builder.getInt1Ty(), cast<IntegerType>(Mask->getType())->getBitWidth());
1151 Mask = Builder.CreateBitCast(Mask, MaskTy);
1152
1153 // If we have less than 8 elements (1, 2 or 4), then the starting mask was an
1154 // i8 and we need to extract down to the right number of elements.
1155 if (NumElts <= 4) {
1156 int Indices[4];
1157 for (unsigned i = 0; i != NumElts; ++i)
1158 Indices[i] = i;
1159 Mask = Builder.CreateShuffleVector(
1160 Mask, Mask, makeArrayRef(Indices, NumElts), "extract");
1161 }
1162
1163 return Mask;
1164}
1165
1166static Value *EmitX86Select(IRBuilder<> &Builder, Value *Mask,
1167 Value *Op0, Value *Op1) {
1168 // If the mask is all ones just emit the first operation.
1169 if (const auto *C = dyn_cast<Constant>(Mask))
1170 if (C->isAllOnesValue())
1171 return Op0;
1172
1173 Mask = getX86MaskVec(Builder, Mask,
1174 cast<FixedVectorType>(Op0->getType())->getNumElements());
1175 return Builder.CreateSelect(Mask, Op0, Op1);
1176}
1177
1178static Value *EmitX86ScalarSelect(IRBuilder<> &Builder, Value *Mask,
1179 Value *Op0, Value *Op1) {
1180 // If the mask is all ones just emit the first operation.
1181 if (const auto *C = dyn_cast<Constant>(Mask))
1182 if (C->isAllOnesValue())
1183 return Op0;
1184
1185 auto *MaskTy = FixedVectorType::get(Builder.getInt1Ty(),
1186 Mask->getType()->getIntegerBitWidth());
1187 Mask = Builder.CreateBitCast(Mask, MaskTy);
1188 Mask = Builder.CreateExtractElement(Mask, (uint64_t)0);
1189 return Builder.CreateSelect(Mask, Op0, Op1);
1190}
1191
1192// Handle autoupgrade for masked PALIGNR and VALIGND/Q intrinsics.
1193// PALIGNR handles large immediates by shifting while VALIGN masks the immediate
1194// so we need to handle both cases. VALIGN also doesn't have 128-bit lanes.
1195static Value *UpgradeX86ALIGNIntrinsics(IRBuilder<> &Builder, Value *Op0,
1196 Value *Op1, Value *Shift,
1197 Value *Passthru, Value *Mask,
1198 bool IsVALIGN) {
1199 unsigned ShiftVal = cast<llvm::ConstantInt>(Shift)->getZExtValue();
1200
1201 unsigned NumElts = cast<FixedVectorType>(Op0->getType())->getNumElements();
1202 assert((IsVALIGN || NumElts % 16 == 0) && "Illegal NumElts for PALIGNR!")(static_cast <bool> ((IsVALIGN || NumElts % 16 == 0) &&
"Illegal NumElts for PALIGNR!") ? void (0) : __assert_fail (
"(IsVALIGN || NumElts % 16 == 0) && \"Illegal NumElts for PALIGNR!\""
, "llvm/lib/IR/AutoUpgrade.cpp", 1202, __extension__ __PRETTY_FUNCTION__
))
;
1203 assert((!IsVALIGN || NumElts <= 16) && "NumElts too large for VALIGN!")(static_cast <bool> ((!IsVALIGN || NumElts <= 16) &&
"NumElts too large for VALIGN!") ? void (0) : __assert_fail (
"(!IsVALIGN || NumElts <= 16) && \"NumElts too large for VALIGN!\""
, "llvm/lib/IR/AutoUpgrade.cpp", 1203, __extension__ __PRETTY_FUNCTION__
))
;
1204 assert(isPowerOf2_32(NumElts) && "NumElts not a power of 2!")(static_cast <bool> (isPowerOf2_32(NumElts) && "NumElts not a power of 2!"
) ? void (0) : __assert_fail ("isPowerOf2_32(NumElts) && \"NumElts not a power of 2!\""
, "llvm/lib/IR/AutoUpgrade.cpp", 1204, __extension__ __PRETTY_FUNCTION__
))
;
1205
1206 // Mask the immediate for VALIGN.
1207 if (IsVALIGN)
1208 ShiftVal &= (NumElts - 1);
1209
1210 // If palignr is shifting the pair of vectors more than the size of two
1211 // lanes, emit zero.
1212 if (ShiftVal >= 32)
1213 return llvm::Constant::getNullValue(Op0->getType());
1214
1215 // If palignr is shifting the pair of input vectors more than one lane,
1216 // but less than two lanes, convert to shifting in zeroes.
1217 if (ShiftVal > 16) {
1218 ShiftVal -= 16;
1219 Op1 = Op0;
1220 Op0 = llvm::Constant::getNullValue(Op0->getType());
1221 }
1222
1223 int Indices[64];
1224 // 256-bit palignr operates on 128-bit lanes so we need to handle that
1225 for (unsigned l = 0; l < NumElts; l += 16) {
1226 for (unsigned i = 0; i != 16; ++i) {
1227 unsigned Idx = ShiftVal + i;
1228 if (!IsVALIGN && Idx >= 16) // Disable wrap for VALIGN.
1229 Idx += NumElts - 16; // End of lane, switch operand.
1230 Indices[l + i] = Idx + l;
1231 }
1232 }
1233
1234 Value *Align = Builder.CreateShuffleVector(Op1, Op0,
1235 makeArrayRef(Indices, NumElts),
1236 "palignr");
1237
1238 return EmitX86Select(Builder, Mask, Align, Passthru);
1239}
1240
1241static Value *UpgradeX86VPERMT2Intrinsics(IRBuilder<> &Builder, CallInst &CI,
1242 bool ZeroMask, bool IndexForm) {
1243 Type *Ty = CI.getType();
1244 unsigned VecWidth = Ty->getPrimitiveSizeInBits();
1245 unsigned EltWidth = Ty->getScalarSizeInBits();
1246 bool IsFloat = Ty->isFPOrFPVectorTy();
1247 Intrinsic::ID IID;
1248 if (VecWidth == 128 && EltWidth == 32 && IsFloat)
1249 IID = Intrinsic::x86_avx512_vpermi2var_ps_128;
1250 else if (VecWidth == 128 && EltWidth == 32 && !IsFloat)
1251 IID = Intrinsic::x86_avx512_vpermi2var_d_128;
1252 else if (VecWidth == 128 && EltWidth == 64 && IsFloat)
1253 IID = Intrinsic::x86_avx512_vpermi2var_pd_128;
1254 else if (VecWidth == 128 && EltWidth == 64 && !IsFloat)
1255 IID = Intrinsic::x86_avx512_vpermi2var_q_128;
1256 else if (VecWidth == 256 && EltWidth == 32 && IsFloat)
1257 IID = Intrinsic::x86_avx512_vpermi2var_ps_256;
1258 else if (VecWidth == 256 && EltWidth == 32 && !IsFloat)
1259 IID = Intrinsic::x86_avx512_vpermi2var_d_256;
1260 else if (VecWidth == 256 && EltWidth == 64 && IsFloat)
1261 IID = Intrinsic::x86_avx512_vpermi2var_pd_256;
1262 else if (VecWidth == 256 && EltWidth == 64 && !IsFloat)
1263 IID = Intrinsic::x86_avx512_vpermi2var_q_256;
1264 else if (VecWidth == 512 && EltWidth == 32 && IsFloat)
1265 IID = Intrinsic::x86_avx512_vpermi2var_ps_512;
1266 else if (VecWidth == 512 && EltWidth == 32 && !IsFloat)
1267 IID = Intrinsic::x86_avx512_vpermi2var_d_512;
1268 else if (VecWidth == 512 && EltWidth == 64 && IsFloat)
1269 IID = Intrinsic::x86_avx512_vpermi2var_pd_512;
1270 else if (VecWidth == 512 && EltWidth == 64 && !IsFloat)
1271 IID = Intrinsic::x86_avx512_vpermi2var_q_512;
1272 else if (VecWidth == 128 && EltWidth == 16)
1273 IID = Intrinsic::x86_avx512_vpermi2var_hi_128;
1274 else if (VecWidth == 256 && EltWidth == 16)
1275 IID = Intrinsic::x86_avx512_vpermi2var_hi_256;
1276 else if (VecWidth == 512 && EltWidth == 16)
1277 IID = Intrinsic::x86_avx512_vpermi2var_hi_512;
1278 else if (VecWidth == 128 && EltWidth == 8)
1279 IID = Intrinsic::x86_avx512_vpermi2var_qi_128;
1280 else if (VecWidth == 256 && EltWidth == 8)
1281 IID = Intrinsic::x86_avx512_vpermi2var_qi_256;
1282 else if (VecWidth == 512 && EltWidth == 8)
1283 IID = Intrinsic::x86_avx512_vpermi2var_qi_512;
1284 else
1285 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1285)
;
1286
1287 Value *Args[] = { CI.getArgOperand(0) , CI.getArgOperand(1),
1288 CI.getArgOperand(2) };
1289
1290 // If this isn't index form we need to swap operand 0 and 1.
1291 if (!IndexForm)
1292 std::swap(Args[0], Args[1]);
1293
1294 Value *V = Builder.CreateCall(Intrinsic::getDeclaration(CI.getModule(), IID),
1295 Args);
1296 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(Ty)
1297 : Builder.CreateBitCast(CI.getArgOperand(1),
1298 Ty);
1299 return EmitX86Select(Builder, CI.getArgOperand(3), V, PassThru);
1300}
1301
1302static Value *UpgradeX86BinaryIntrinsics(IRBuilder<> &Builder, CallInst &CI,
1303 Intrinsic::ID IID) {
1304 Type *Ty = CI.getType();
1305 Value *Op0 = CI.getOperand(0);
1306 Value *Op1 = CI.getOperand(1);
1307 Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID, Ty);
1308 Value *Res = Builder.CreateCall(Intrin, {Op0, Op1});
1309
1310 if (CI.arg_size() == 4) { // For masked intrinsics.
1311 Value *VecSrc = CI.getOperand(2);
1312 Value *Mask = CI.getOperand(3);
1313 Res = EmitX86Select(Builder, Mask, Res, VecSrc);
1314 }
1315 return Res;
1316}
1317
1318static Value *upgradeX86Rotate(IRBuilder<> &Builder, CallInst &CI,
1319 bool IsRotateRight) {
1320 Type *Ty = CI.getType();
1321 Value *Src = CI.getArgOperand(0);
1322 Value *Amt = CI.getArgOperand(1);
1323
1324 // Amount may be scalar immediate, in which case create a splat vector.
1325 // Funnel shifts amounts are treated as modulo and types are all power-of-2 so
1326 // we only care about the lowest log2 bits anyway.
1327 if (Amt->getType() != Ty) {
1328 unsigned NumElts = cast<FixedVectorType>(Ty)->getNumElements();
1329 Amt = Builder.CreateIntCast(Amt, Ty->getScalarType(), false);
1330 Amt = Builder.CreateVectorSplat(NumElts, Amt);
1331 }
1332
1333 Intrinsic::ID IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl;
1334 Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID, Ty);
1335 Value *Res = Builder.CreateCall(Intrin, {Src, Src, Amt});
1336
1337 if (CI.arg_size() == 4) { // For masked intrinsics.
1338 Value *VecSrc = CI.getOperand(2);
1339 Value *Mask = CI.getOperand(3);
1340 Res = EmitX86Select(Builder, Mask, Res, VecSrc);
1341 }
1342 return Res;
1343}
1344
1345static Value *upgradeX86vpcom(IRBuilder<> &Builder, CallInst &CI, unsigned Imm,
1346 bool IsSigned) {
1347 Type *Ty = CI.getType();
1348 Value *LHS = CI.getArgOperand(0);
1349 Value *RHS = CI.getArgOperand(1);
1350
1351 CmpInst::Predicate Pred;
1352 switch (Imm) {
1353 case 0x0:
1354 Pred = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
1355 break;
1356 case 0x1:
1357 Pred = IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
1358 break;
1359 case 0x2:
1360 Pred = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
1361 break;
1362 case 0x3:
1363 Pred = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
1364 break;
1365 case 0x4:
1366 Pred = ICmpInst::ICMP_EQ;
1367 break;
1368 case 0x5:
1369 Pred = ICmpInst::ICMP_NE;
1370 break;
1371 case 0x6:
1372 return Constant::getNullValue(Ty); // FALSE
1373 case 0x7:
1374 return Constant::getAllOnesValue(Ty); // TRUE
1375 default:
1376 llvm_unreachable("Unknown XOP vpcom/vpcomu predicate")::llvm::llvm_unreachable_internal("Unknown XOP vpcom/vpcomu predicate"
, "llvm/lib/IR/AutoUpgrade.cpp", 1376)
;
1377 }
1378
1379 Value *Cmp = Builder.CreateICmp(Pred, LHS, RHS);
1380 Value *Ext = Builder.CreateSExt(Cmp, Ty);
1381 return Ext;
1382}
1383
1384static Value *upgradeX86ConcatShift(IRBuilder<> &Builder, CallInst &CI,
1385 bool IsShiftRight, bool ZeroMask) {
1386 Type *Ty = CI.getType();
1387 Value *Op0 = CI.getArgOperand(0);
1388 Value *Op1 = CI.getArgOperand(1);
1389 Value *Amt = CI.getArgOperand(2);
1390
1391 if (IsShiftRight)
1392 std::swap(Op0, Op1);
1393
1394 // Amount may be scalar immediate, in which case create a splat vector.
1395 // Funnel shifts amounts are treated as modulo and types are all power-of-2 so
1396 // we only care about the lowest log2 bits anyway.
1397 if (Amt->getType() != Ty) {
1398 unsigned NumElts = cast<FixedVectorType>(Ty)->getNumElements();
1399 Amt = Builder.CreateIntCast(Amt, Ty->getScalarType(), false);
1400 Amt = Builder.CreateVectorSplat(NumElts, Amt);
1401 }
1402
1403 Intrinsic::ID IID = IsShiftRight ? Intrinsic::fshr : Intrinsic::fshl;
1404 Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID, Ty);
1405 Value *Res = Builder.CreateCall(Intrin, {Op0, Op1, Amt});
1406
1407 unsigned NumArgs = CI.arg_size();
1408 if (NumArgs >= 4) { // For masked intrinsics.
1409 Value *VecSrc = NumArgs == 5 ? CI.getArgOperand(3) :
1410 ZeroMask ? ConstantAggregateZero::get(CI.getType()) :
1411 CI.getArgOperand(0);
1412 Value *Mask = CI.getOperand(NumArgs - 1);
1413 Res = EmitX86Select(Builder, Mask, Res, VecSrc);
1414 }
1415 return Res;
1416}
1417
1418static Value *UpgradeMaskedStore(IRBuilder<> &Builder,
1419 Value *Ptr, Value *Data, Value *Mask,
1420 bool Aligned) {
1421 // Cast the pointer to the right type.
1422 Ptr = Builder.CreateBitCast(Ptr,
1423 llvm::PointerType::getUnqual(Data->getType()));
1424 const Align Alignment =
1425 Aligned
1426 ? Align(Data->getType()->getPrimitiveSizeInBits().getFixedSize() / 8)
1427 : Align(1);
1428
1429 // If the mask is all ones just emit a regular store.
1430 if (const auto *C = dyn_cast<Constant>(Mask))
1431 if (C->isAllOnesValue())
1432 return Builder.CreateAlignedStore(Data, Ptr, Alignment);
1433
1434 // Convert the mask from an integer type to a vector of i1.
1435 unsigned NumElts = cast<FixedVectorType>(Data->getType())->getNumElements();
1436 Mask = getX86MaskVec(Builder, Mask, NumElts);
1437 return Builder.CreateMaskedStore(Data, Ptr, Alignment, Mask);
1438}
1439
1440static Value *UpgradeMaskedLoad(IRBuilder<> &Builder,
1441 Value *Ptr, Value *Passthru, Value *Mask,
1442 bool Aligned) {
1443 Type *ValTy = Passthru->getType();
1444 // Cast the pointer to the right type.
1445 Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(ValTy));
1446 const Align Alignment =
1447 Aligned
1448 ? Align(Passthru->getType()->getPrimitiveSizeInBits().getFixedSize() /
1449 8)
1450 : Align(1);
1451
1452 // If the mask is all ones just emit a regular store.
1453 if (const auto *C = dyn_cast<Constant>(Mask))
1454 if (C->isAllOnesValue())
1455 return Builder.CreateAlignedLoad(ValTy, Ptr, Alignment);
1456
1457 // Convert the mask from an integer type to a vector of i1.
1458 unsigned NumElts = cast<FixedVectorType>(ValTy)->getNumElements();
1459 Mask = getX86MaskVec(Builder, Mask, NumElts);
1460 return Builder.CreateMaskedLoad(ValTy, Ptr, Alignment, Mask, Passthru);
1461}
1462
1463static Value *upgradeAbs(IRBuilder<> &Builder, CallInst &CI) {
1464 Type *Ty = CI.getType();
1465 Value *Op0 = CI.getArgOperand(0);
1466 Function *F = Intrinsic::getDeclaration(CI.getModule(), Intrinsic::abs, Ty);
1467 Value *Res = Builder.CreateCall(F, {Op0, Builder.getInt1(false)});
1468 if (CI.arg_size() == 3)
1469 Res = EmitX86Select(Builder, CI.getArgOperand(2), Res, CI.getArgOperand(1));
1470 return Res;
1471}
1472
1473static Value *upgradePMULDQ(IRBuilder<> &Builder, CallInst &CI, bool IsSigned) {
1474 Type *Ty = CI.getType();
1475
1476 // Arguments have a vXi32 type so cast to vXi64.
1477 Value *LHS = Builder.CreateBitCast(CI.getArgOperand(0), Ty);
1478 Value *RHS = Builder.CreateBitCast(CI.getArgOperand(1), Ty);
1479
1480 if (IsSigned) {
1481 // Shift left then arithmetic shift right.
1482 Constant *ShiftAmt = ConstantInt::get(Ty, 32);
1483 LHS = Builder.CreateShl(LHS, ShiftAmt);
1484 LHS = Builder.CreateAShr(LHS, ShiftAmt);
1485 RHS = Builder.CreateShl(RHS, ShiftAmt);
1486 RHS = Builder.CreateAShr(RHS, ShiftAmt);
1487 } else {
1488 // Clear the upper bits.
1489 Constant *Mask = ConstantInt::get(Ty, 0xffffffff);
1490 LHS = Builder.CreateAnd(LHS, Mask);
1491 RHS = Builder.CreateAnd(RHS, Mask);
1492 }
1493
1494 Value *Res = Builder.CreateMul(LHS, RHS);
1495
1496 if (CI.arg_size() == 4)
1497 Res = EmitX86Select(Builder, CI.getArgOperand(3), Res, CI.getArgOperand(2));
1498
1499 return Res;
1500}
1501
1502// Applying mask on vector of i1's and make sure result is at least 8 bits wide.
1503static Value *ApplyX86MaskOn1BitsVec(IRBuilder<> &Builder, Value *Vec,
1504 Value *Mask) {
1505 unsigned NumElts = cast<FixedVectorType>(Vec->getType())->getNumElements();
1506 if (Mask) {
1507 const auto *C = dyn_cast<Constant>(Mask);
1508 if (!C || !C->isAllOnesValue())
1509 Vec = Builder.CreateAnd(Vec, getX86MaskVec(Builder, Mask, NumElts));
1510 }
1511
1512 if (NumElts < 8) {
1513 int Indices[8];
1514 for (unsigned i = 0; i != NumElts; ++i)
1515 Indices[i] = i;
1516 for (unsigned i = NumElts; i != 8; ++i)
1517 Indices[i] = NumElts + i % NumElts;
1518 Vec = Builder.CreateShuffleVector(Vec,
1519 Constant::getNullValue(Vec->getType()),
1520 Indices);
1521 }
1522 return Builder.CreateBitCast(Vec, Builder.getIntNTy(std::max(NumElts, 8U)));
1523}
1524
1525static Value *upgradeMaskedCompare(IRBuilder<> &Builder, CallInst &CI,
1526 unsigned CC, bool Signed) {
1527 Value *Op0 = CI.getArgOperand(0);
1528 unsigned NumElts = cast<FixedVectorType>(Op0->getType())->getNumElements();
1529
1530 Value *Cmp;
1531 if (CC == 3) {
1532 Cmp = Constant::getNullValue(
1533 FixedVectorType::get(Builder.getInt1Ty(), NumElts));
1534 } else if (CC == 7) {
1535 Cmp = Constant::getAllOnesValue(
1536 FixedVectorType::get(Builder.getInt1Ty(), NumElts));
1537 } else {
1538 ICmpInst::Predicate Pred;
1539 switch (CC) {
1540 default: llvm_unreachable("Unknown condition code")::llvm::llvm_unreachable_internal("Unknown condition code", "llvm/lib/IR/AutoUpgrade.cpp"
, 1540)
;
1541 case 0: Pred = ICmpInst::ICMP_EQ; break;
1542 case 1: Pred = Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; break;
1543 case 2: Pred = Signed ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; break;
1544 case 4: Pred = ICmpInst::ICMP_NE; break;
1545 case 5: Pred = Signed ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; break;
1546 case 6: Pred = Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; break;
1547 }
1548 Cmp = Builder.CreateICmp(Pred, Op0, CI.getArgOperand(1));
1549 }
1550
1551 Value *Mask = CI.getArgOperand(CI.arg_size() - 1);
1552
1553 return ApplyX86MaskOn1BitsVec(Builder, Cmp, Mask);
1554}
1555
1556// Replace a masked intrinsic with an older unmasked intrinsic.
1557static Value *UpgradeX86MaskedShift(IRBuilder<> &Builder, CallInst &CI,
1558 Intrinsic::ID IID) {
1559 Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID);
1560 Value *Rep = Builder.CreateCall(Intrin,
1561 { CI.getArgOperand(0), CI.getArgOperand(1) });
1562 return EmitX86Select(Builder, CI.getArgOperand(3), Rep, CI.getArgOperand(2));
1563}
1564
1565static Value* upgradeMaskedMove(IRBuilder<> &Builder, CallInst &CI) {
1566 Value* A = CI.getArgOperand(0);
1567 Value* B = CI.getArgOperand(1);
1568 Value* Src = CI.getArgOperand(2);
1569 Value* Mask = CI.getArgOperand(3);
1570
1571 Value* AndNode = Builder.CreateAnd(Mask, APInt(8, 1));
1572 Value* Cmp = Builder.CreateIsNotNull(AndNode);
1573 Value* Extract1 = Builder.CreateExtractElement(B, (uint64_t)0);
1574 Value* Extract2 = Builder.CreateExtractElement(Src, (uint64_t)0);
1575 Value* Select = Builder.CreateSelect(Cmp, Extract1, Extract2);
1576 return Builder.CreateInsertElement(A, Select, (uint64_t)0);
1577}
1578
1579
1580static Value* UpgradeMaskToInt(IRBuilder<> &Builder, CallInst &CI) {
1581 Value* Op = CI.getArgOperand(0);
1582 Type* ReturnOp = CI.getType();
1583 unsigned NumElts = cast<FixedVectorType>(CI.getType())->getNumElements();
1584 Value *Mask = getX86MaskVec(Builder, Op, NumElts);
1585 return Builder.CreateSExt(Mask, ReturnOp, "vpmovm2");
1586}
1587
1588// Replace intrinsic with unmasked version and a select.
1589static bool upgradeAVX512MaskToSelect(StringRef Name, IRBuilder<> &Builder,
1590 CallInst &CI, Value *&Rep) {
1591 Name = Name.substr(12); // Remove avx512.mask.
1592
1593 unsigned VecWidth = CI.getType()->getPrimitiveSizeInBits();
1594 unsigned EltWidth = CI.getType()->getScalarSizeInBits();
1595 Intrinsic::ID IID;
1596 if (Name.startswith("max.p")) {
1597 if (VecWidth == 128 && EltWidth == 32)
1598 IID = Intrinsic::x86_sse_max_ps;
1599 else if (VecWidth == 128 && EltWidth == 64)
1600 IID = Intrinsic::x86_sse2_max_pd;
1601 else if (VecWidth == 256 && EltWidth == 32)
1602 IID = Intrinsic::x86_avx_max_ps_256;
1603 else if (VecWidth == 256 && EltWidth == 64)
1604 IID = Intrinsic::x86_avx_max_pd_256;
1605 else
1606 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1606)
;
1607 } else if (Name.startswith("min.p")) {
1608 if (VecWidth == 128 && EltWidth == 32)
1609 IID = Intrinsic::x86_sse_min_ps;
1610 else if (VecWidth == 128 && EltWidth == 64)
1611 IID = Intrinsic::x86_sse2_min_pd;
1612 else if (VecWidth == 256 && EltWidth == 32)
1613 IID = Intrinsic::x86_avx_min_ps_256;
1614 else if (VecWidth == 256 && EltWidth == 64)
1615 IID = Intrinsic::x86_avx_min_pd_256;
1616 else
1617 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1617)
;
1618 } else if (Name.startswith("pshuf.b.")) {
1619 if (VecWidth == 128)
1620 IID = Intrinsic::x86_ssse3_pshuf_b_128;
1621 else if (VecWidth == 256)
1622 IID = Intrinsic::x86_avx2_pshuf_b;
1623 else if (VecWidth == 512)
1624 IID = Intrinsic::x86_avx512_pshuf_b_512;
1625 else
1626 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1626)
;
1627 } else if (Name.startswith("pmul.hr.sw.")) {
1628 if (VecWidth == 128)
1629 IID = Intrinsic::x86_ssse3_pmul_hr_sw_128;
1630 else if (VecWidth == 256)
1631 IID = Intrinsic::x86_avx2_pmul_hr_sw;
1632 else if (VecWidth == 512)
1633 IID = Intrinsic::x86_avx512_pmul_hr_sw_512;
1634 else
1635 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1635)
;
1636 } else if (Name.startswith("pmulh.w.")) {
1637 if (VecWidth == 128)
1638 IID = Intrinsic::x86_sse2_pmulh_w;
1639 else if (VecWidth == 256)
1640 IID = Intrinsic::x86_avx2_pmulh_w;
1641 else if (VecWidth == 512)
1642 IID = Intrinsic::x86_avx512_pmulh_w_512;
1643 else
1644 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1644)
;
1645 } else if (Name.startswith("pmulhu.w.")) {
1646 if (VecWidth == 128)
1647 IID = Intrinsic::x86_sse2_pmulhu_w;
1648 else if (VecWidth == 256)
1649 IID = Intrinsic::x86_avx2_pmulhu_w;
1650 else if (VecWidth == 512)
1651 IID = Intrinsic::x86_avx512_pmulhu_w_512;
1652 else
1653 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1653)
;
1654 } else if (Name.startswith("pmaddw.d.")) {
1655 if (VecWidth == 128)
1656 IID = Intrinsic::x86_sse2_pmadd_wd;
1657 else if (VecWidth == 256)
1658 IID = Intrinsic::x86_avx2_pmadd_wd;
1659 else if (VecWidth == 512)
1660 IID = Intrinsic::x86_avx512_pmaddw_d_512;
1661 else
1662 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1662)
;
1663 } else if (Name.startswith("pmaddubs.w.")) {
1664 if (VecWidth == 128)
1665 IID = Intrinsic::x86_ssse3_pmadd_ub_sw_128;
1666 else if (VecWidth == 256)
1667 IID = Intrinsic::x86_avx2_pmadd_ub_sw;
1668 else if (VecWidth == 512)
1669 IID = Intrinsic::x86_avx512_pmaddubs_w_512;
1670 else
1671 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1671)
;
1672 } else if (Name.startswith("packsswb.")) {
1673 if (VecWidth == 128)
1674 IID = Intrinsic::x86_sse2_packsswb_128;
1675 else if (VecWidth == 256)
1676 IID = Intrinsic::x86_avx2_packsswb;
1677 else if (VecWidth == 512)
1678 IID = Intrinsic::x86_avx512_packsswb_512;
1679 else
1680 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1680)
;
1681 } else if (Name.startswith("packssdw.")) {
1682 if (VecWidth == 128)
1683 IID = Intrinsic::x86_sse2_packssdw_128;
1684 else if (VecWidth == 256)
1685 IID = Intrinsic::x86_avx2_packssdw;
1686 else if (VecWidth == 512)
1687 IID = Intrinsic::x86_avx512_packssdw_512;
1688 else
1689 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1689)
;
1690 } else if (Name.startswith("packuswb.")) {
1691 if (VecWidth == 128)
1692 IID = Intrinsic::x86_sse2_packuswb_128;
1693 else if (VecWidth == 256)
1694 IID = Intrinsic::x86_avx2_packuswb;
1695 else if (VecWidth == 512)
1696 IID = Intrinsic::x86_avx512_packuswb_512;
1697 else
1698 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1698)
;
1699 } else if (Name.startswith("packusdw.")) {
1700 if (VecWidth == 128)
1701 IID = Intrinsic::x86_sse41_packusdw;
1702 else if (VecWidth == 256)
1703 IID = Intrinsic::x86_avx2_packusdw;
1704 else if (VecWidth == 512)
1705 IID = Intrinsic::x86_avx512_packusdw_512;
1706 else
1707 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1707)
;
1708 } else if (Name.startswith("vpermilvar.")) {
1709 if (VecWidth == 128 && EltWidth == 32)
1710 IID = Intrinsic::x86_avx_vpermilvar_ps;
1711 else if (VecWidth == 128 && EltWidth == 64)
1712 IID = Intrinsic::x86_avx_vpermilvar_pd;
1713 else if (VecWidth == 256 && EltWidth == 32)
1714 IID = Intrinsic::x86_avx_vpermilvar_ps_256;
1715 else if (VecWidth == 256 && EltWidth == 64)
1716 IID = Intrinsic::x86_avx_vpermilvar_pd_256;
1717 else if (VecWidth == 512 && EltWidth == 32)
1718 IID = Intrinsic::x86_avx512_vpermilvar_ps_512;
1719 else if (VecWidth == 512 && EltWidth == 64)
1720 IID = Intrinsic::x86_avx512_vpermilvar_pd_512;
1721 else
1722 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1722)
;
1723 } else if (Name == "cvtpd2dq.256") {
1724 IID = Intrinsic::x86_avx_cvt_pd2dq_256;
1725 } else if (Name == "cvtpd2ps.256") {
1726 IID = Intrinsic::x86_avx_cvt_pd2_ps_256;
1727 } else if (Name == "cvttpd2dq.256") {
1728 IID = Intrinsic::x86_avx_cvtt_pd2dq_256;
1729 } else if (Name == "cvttps2dq.128") {
1730 IID = Intrinsic::x86_sse2_cvttps2dq;
1731 } else if (Name == "cvttps2dq.256") {
1732 IID = Intrinsic::x86_avx_cvtt_ps2dq_256;
1733 } else if (Name.startswith("permvar.")) {
1734 bool IsFloat = CI.getType()->isFPOrFPVectorTy();
1735 if (VecWidth == 256 && EltWidth == 32 && IsFloat)
1736 IID = Intrinsic::x86_avx2_permps;
1737 else if (VecWidth == 256 && EltWidth == 32 && !IsFloat)
1738 IID = Intrinsic::x86_avx2_permd;
1739 else if (VecWidth == 256 && EltWidth == 64 && IsFloat)
1740 IID = Intrinsic::x86_avx512_permvar_df_256;
1741 else if (VecWidth == 256 && EltWidth == 64 && !IsFloat)
1742 IID = Intrinsic::x86_avx512_permvar_di_256;
1743 else if (VecWidth == 512 && EltWidth == 32 && IsFloat)
1744 IID = Intrinsic::x86_avx512_permvar_sf_512;
1745 else if (VecWidth == 512 && EltWidth == 32 && !IsFloat)
1746 IID = Intrinsic::x86_avx512_permvar_si_512;
1747 else if (VecWidth == 512 && EltWidth == 64 && IsFloat)
1748 IID = Intrinsic::x86_avx512_permvar_df_512;
1749 else if (VecWidth == 512 && EltWidth == 64 && !IsFloat)
1750 IID = Intrinsic::x86_avx512_permvar_di_512;
1751 else if (VecWidth == 128 && EltWidth == 16)
1752 IID = Intrinsic::x86_avx512_permvar_hi_128;
1753 else if (VecWidth == 256 && EltWidth == 16)
1754 IID = Intrinsic::x86_avx512_permvar_hi_256;
1755 else if (VecWidth == 512 && EltWidth == 16)
1756 IID = Intrinsic::x86_avx512_permvar_hi_512;
1757 else if (VecWidth == 128 && EltWidth == 8)
1758 IID = Intrinsic::x86_avx512_permvar_qi_128;
1759 else if (VecWidth == 256 && EltWidth == 8)
1760 IID = Intrinsic::x86_avx512_permvar_qi_256;
1761 else if (VecWidth == 512 && EltWidth == 8)
1762 IID = Intrinsic::x86_avx512_permvar_qi_512;
1763 else
1764 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1764)
;
1765 } else if (Name.startswith("dbpsadbw.")) {
1766 if (VecWidth == 128)
1767 IID = Intrinsic::x86_avx512_dbpsadbw_128;
1768 else if (VecWidth == 256)
1769 IID = Intrinsic::x86_avx512_dbpsadbw_256;
1770 else if (VecWidth == 512)
1771 IID = Intrinsic::x86_avx512_dbpsadbw_512;
1772 else
1773 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1773)
;
1774 } else if (Name.startswith("pmultishift.qb.")) {
1775 if (VecWidth == 128)
1776 IID = Intrinsic::x86_avx512_pmultishift_qb_128;
1777 else if (VecWidth == 256)
1778 IID = Intrinsic::x86_avx512_pmultishift_qb_256;
1779 else if (VecWidth == 512)
1780 IID = Intrinsic::x86_avx512_pmultishift_qb_512;
1781 else
1782 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1782)
;
1783 } else if (Name.startswith("conflict.")) {
1784 if (Name[9] == 'd' && VecWidth == 128)
1785 IID = Intrinsic::x86_avx512_conflict_d_128;
1786 else if (Name[9] == 'd' && VecWidth == 256)
1787 IID = Intrinsic::x86_avx512_conflict_d_256;
1788 else if (Name[9] == 'd' && VecWidth == 512)
1789 IID = Intrinsic::x86_avx512_conflict_d_512;
1790 else if (Name[9] == 'q' && VecWidth == 128)
1791 IID = Intrinsic::x86_avx512_conflict_q_128;
1792 else if (Name[9] == 'q' && VecWidth == 256)
1793 IID = Intrinsic::x86_avx512_conflict_q_256;
1794 else if (Name[9] == 'q' && VecWidth == 512)
1795 IID = Intrinsic::x86_avx512_conflict_q_512;
1796 else
1797 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1797)
;
1798 } else if (Name.startswith("pavg.")) {
1799 if (Name[5] == 'b' && VecWidth == 128)
1800 IID = Intrinsic::x86_sse2_pavg_b;
1801 else if (Name[5] == 'b' && VecWidth == 256)
1802 IID = Intrinsic::x86_avx2_pavg_b;
1803 else if (Name[5] == 'b' && VecWidth == 512)
1804 IID = Intrinsic::x86_avx512_pavg_b_512;
1805 else if (Name[5] == 'w' && VecWidth == 128)
1806 IID = Intrinsic::x86_sse2_pavg_w;
1807 else if (Name[5] == 'w' && VecWidth == 256)
1808 IID = Intrinsic::x86_avx2_pavg_w;
1809 else if (Name[5] == 'w' && VecWidth == 512)
1810 IID = Intrinsic::x86_avx512_pavg_w_512;
1811 else
1812 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 1812)
;
1813 } else
1814 return false;
1815
1816 SmallVector<Value *, 4> Args(CI.args());
1817 Args.pop_back();
1818 Args.pop_back();
1819 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI.getModule(), IID),
1820 Args);
1821 unsigned NumArgs = CI.arg_size();
1822 Rep = EmitX86Select(Builder, CI.getArgOperand(NumArgs - 1), Rep,
1823 CI.getArgOperand(NumArgs - 2));
1824 return true;
1825}
1826
1827/// Upgrade comment in call to inline asm that represents an objc retain release
1828/// marker.
1829void llvm::UpgradeInlineAsmString(std::string *AsmStr) {
1830 size_t Pos;
1831 if (AsmStr->find("mov\tfp") == 0 &&
1832 AsmStr->find("objc_retainAutoreleaseReturnValue") != std::string::npos &&
1833 (Pos = AsmStr->find("# marker")) != std::string::npos) {
1834 AsmStr->replace(Pos, 1, ";");
1835 }
1836}
1837
1838static Value *UpgradeARMIntrinsicCall(StringRef Name, CallInst *CI, Function *F,
1839 IRBuilder<> &Builder) {
1840 if (Name == "mve.vctp64.old") {
1841 // Replace the old v4i1 vctp64 with a v2i1 vctp and predicate-casts to the
1842 // correct type.
1843 Value *VCTP = Builder.CreateCall(
1844 Intrinsic::getDeclaration(F->getParent(), Intrinsic::arm_mve_vctp64),
1845 CI->getArgOperand(0), CI->getName());
1846 Value *C1 = Builder.CreateCall(
1847 Intrinsic::getDeclaration(
1848 F->getParent(), Intrinsic::arm_mve_pred_v2i,
1849 {VectorType::get(Builder.getInt1Ty(), 2, false)}),
1850 VCTP);
1851 return Builder.CreateCall(
1852 Intrinsic::getDeclaration(
1853 F->getParent(), Intrinsic::arm_mve_pred_i2v,
1854 {VectorType::get(Builder.getInt1Ty(), 4, false)}),
1855 C1);
1856 } else if (Name == "mve.mull.int.predicated.v2i64.v4i32.v4i1" ||
1857 Name == "mve.vqdmull.predicated.v2i64.v4i32.v4i1" ||
1858 Name == "mve.vldr.gather.base.predicated.v2i64.v2i64.v4i1" ||
1859 Name == "mve.vldr.gather.base.wb.predicated.v2i64.v2i64.v4i1" ||
1860 Name == "mve.vldr.gather.offset.predicated.v2i64.p0i64.v2i64.v4i1" ||
1861 Name == "mve.vstr.scatter.base.predicated.v2i64.v2i64.v4i1" ||
1862 Name == "mve.vstr.scatter.base.wb.predicated.v2i64.v2i64.v4i1" ||
1863 Name == "mve.vstr.scatter.offset.predicated.p0i64.v2i64.v2i64.v4i1" ||
1864 Name == "cde.vcx1q.predicated.v2i64.v4i1" ||
1865 Name == "cde.vcx1qa.predicated.v2i64.v4i1" ||
1866 Name == "cde.vcx2q.predicated.v2i64.v4i1" ||
1867 Name == "cde.vcx2qa.predicated.v2i64.v4i1" ||
1868 Name == "cde.vcx3q.predicated.v2i64.v4i1" ||
1869 Name == "cde.vcx3qa.predicated.v2i64.v4i1") {
1870 std::vector<Type *> Tys;
1871 unsigned ID = CI->getIntrinsicID();
1872 Type *V2I1Ty = FixedVectorType::get(Builder.getInt1Ty(), 2);
1873 switch (ID) {
1874 case Intrinsic::arm_mve_mull_int_predicated:
1875 case Intrinsic::arm_mve_vqdmull_predicated:
1876 case Intrinsic::arm_mve_vldr_gather_base_predicated:
1877 Tys = {CI->getType(), CI->getOperand(0)->getType(), V2I1Ty};
1878 break;
1879 case Intrinsic::arm_mve_vldr_gather_base_wb_predicated:
1880 case Intrinsic::arm_mve_vstr_scatter_base_predicated:
1881 case Intrinsic::arm_mve_vstr_scatter_base_wb_predicated:
1882 Tys = {CI->getOperand(0)->getType(), CI->getOperand(0)->getType(),
1883 V2I1Ty};
1884 break;
1885 case Intrinsic::arm_mve_vldr_gather_offset_predicated:
1886 Tys = {CI->getType(), CI->getOperand(0)->getType(),
1887 CI->getOperand(1)->getType(), V2I1Ty};
1888 break;
1889 case Intrinsic::arm_mve_vstr_scatter_offset_predicated:
1890 Tys = {CI->getOperand(0)->getType(), CI->getOperand(1)->getType(),
1891 CI->getOperand(2)->getType(), V2I1Ty};
1892 break;
1893 case Intrinsic::arm_cde_vcx1q_predicated:
1894 case Intrinsic::arm_cde_vcx1qa_predicated:
1895 case Intrinsic::arm_cde_vcx2q_predicated:
1896 case Intrinsic::arm_cde_vcx2qa_predicated:
1897 case Intrinsic::arm_cde_vcx3q_predicated:
1898 case Intrinsic::arm_cde_vcx3qa_predicated:
1899 Tys = {CI->getOperand(1)->getType(), V2I1Ty};
1900 break;
1901 default:
1902 llvm_unreachable("Unhandled Intrinsic!")::llvm::llvm_unreachable_internal("Unhandled Intrinsic!", "llvm/lib/IR/AutoUpgrade.cpp"
, 1902)
;
1903 }
1904
1905 std::vector<Value *> Ops;
1906 for (Value *Op : CI->args()) {
1907 Type *Ty = Op->getType();
1908 if (Ty->getScalarSizeInBits() == 1) {
1909 Value *C1 = Builder.CreateCall(
1910 Intrinsic::getDeclaration(
1911 F->getParent(), Intrinsic::arm_mve_pred_v2i,
1912 {VectorType::get(Builder.getInt1Ty(), 4, false)}),
1913 Op);
1914 Op = Builder.CreateCall(
1915 Intrinsic::getDeclaration(F->getParent(),
1916 Intrinsic::arm_mve_pred_i2v, {V2I1Ty}),
1917 C1);
1918 }
1919 Ops.push_back(Op);
1920 }
1921
1922 Function *Fn = Intrinsic::getDeclaration(F->getParent(), ID, Tys);
1923 return Builder.CreateCall(Fn, Ops, CI->getName());
1924 }
1925 llvm_unreachable("Unknown function for ARM CallInst upgrade.")::llvm::llvm_unreachable_internal("Unknown function for ARM CallInst upgrade."
, "llvm/lib/IR/AutoUpgrade.cpp", 1925)
;
1926}
1927
1928/// Upgrade a call to an old intrinsic. All argument and return casting must be
1929/// provided to seamlessly integrate with existing context.
1930void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
1931 Function *F = CI->getCalledFunction();
1932 LLVMContext &C = CI->getContext();
1933 IRBuilder<> Builder(C);
1934 Builder.SetInsertPoint(CI->getParent(), CI->getIterator());
1935
1936 assert(F && "Intrinsic call is not direct?")(static_cast <bool> (F && "Intrinsic call is not direct?"
) ? void (0) : __assert_fail ("F && \"Intrinsic call is not direct?\""
, "llvm/lib/IR/AutoUpgrade.cpp", 1936, __extension__ __PRETTY_FUNCTION__
))
;
1937
1938 if (!NewFn) {
1939 // Get the Function's name.
1940 StringRef Name = F->getName();
1941
1942 assert(Name.startswith("llvm.") && "Intrinsic doesn't start with 'llvm.'")(static_cast <bool> (Name.startswith("llvm.") &&
"Intrinsic doesn't start with 'llvm.'") ? void (0) : __assert_fail
("Name.startswith(\"llvm.\") && \"Intrinsic doesn't start with 'llvm.'\""
, "llvm/lib/IR/AutoUpgrade.cpp", 1942, __extension__ __PRETTY_FUNCTION__
))
;
1943 Name = Name.substr(5);
1944
1945 bool IsX86 = Name.startswith("x86.");
1946 if (IsX86)
1947 Name = Name.substr(4);
1948 bool IsNVVM = Name.startswith("nvvm.");
1949 if (IsNVVM)
1950 Name = Name.substr(5);
1951 bool IsARM = Name.startswith("arm.");
1952 if (IsARM)
1953 Name = Name.substr(4);
1954
1955 if (IsX86 && Name.startswith("sse4a.movnt.")) {
1956 Module *M = F->getParent();
1957 SmallVector<Metadata *, 1> Elts;
1958 Elts.push_back(
1959 ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1)));
1960 MDNode *Node = MDNode::get(C, Elts);
1961
1962 Value *Arg0 = CI->getArgOperand(0);
1963 Value *Arg1 = CI->getArgOperand(1);
1964
1965 // Nontemporal (unaligned) store of the 0'th element of the float/double
1966 // vector.
1967 Type *SrcEltTy = cast<VectorType>(Arg1->getType())->getElementType();
1968 PointerType *EltPtrTy = PointerType::getUnqual(SrcEltTy);
1969 Value *Addr = Builder.CreateBitCast(Arg0, EltPtrTy, "cast");
1970 Value *Extract =
1971 Builder.CreateExtractElement(Arg1, (uint64_t)0, "extractelement");
1972
1973 StoreInst *SI = Builder.CreateAlignedStore(Extract, Addr, Align(1));
1974 SI->setMetadata(M->getMDKindID("nontemporal"), Node);
1975
1976 // Remove intrinsic.
1977 CI->eraseFromParent();
1978 return;
1979 }
1980
1981 if (IsX86 && (Name.startswith("avx.movnt.") ||
1982 Name.startswith("avx512.storent."))) {
1983 Module *M = F->getParent();
1984 SmallVector<Metadata *, 1> Elts;
1985 Elts.push_back(
1986 ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1)));
1987 MDNode *Node = MDNode::get(C, Elts);
1988
1989 Value *Arg0 = CI->getArgOperand(0);
1990 Value *Arg1 = CI->getArgOperand(1);
1991
1992 // Convert the type of the pointer to a pointer to the stored type.
1993 Value *BC = Builder.CreateBitCast(Arg0,
1994 PointerType::getUnqual(Arg1->getType()),
1995 "cast");
1996 StoreInst *SI = Builder.CreateAlignedStore(
1997 Arg1, BC,
1998 Align(Arg1->getType()->getPrimitiveSizeInBits().getFixedSize() / 8));
1999 SI->setMetadata(M->getMDKindID("nontemporal"), Node);
2000
2001 // Remove intrinsic.
2002 CI->eraseFromParent();
2003 return;
2004 }
2005
2006 if (IsX86 && Name == "sse2.storel.dq") {
2007 Value *Arg0 = CI->getArgOperand(0);
2008 Value *Arg1 = CI->getArgOperand(1);
2009
2010 auto *NewVecTy = FixedVectorType::get(Type::getInt64Ty(C), 2);
2011 Value *BC0 = Builder.CreateBitCast(Arg1, NewVecTy, "cast");
2012 Value *Elt = Builder.CreateExtractElement(BC0, (uint64_t)0);
2013 Value *BC = Builder.CreateBitCast(Arg0,
2014 PointerType::getUnqual(Elt->getType()),
2015 "cast");
2016 Builder.CreateAlignedStore(Elt, BC, Align(1));
2017
2018 // Remove intrinsic.
2019 CI->eraseFromParent();
2020 return;
2021 }
2022
2023 if (IsX86 && (Name.startswith("sse.storeu.") ||
2024 Name.startswith("sse2.storeu.") ||
2025 Name.startswith("avx.storeu."))) {
2026 Value *Arg0 = CI->getArgOperand(0);
2027 Value *Arg1 = CI->getArgOperand(1);
2028
2029 Arg0 = Builder.CreateBitCast(Arg0,
2030 PointerType::getUnqual(Arg1->getType()),
2031 "cast");
2032 Builder.CreateAlignedStore(Arg1, Arg0, Align(1));
2033
2034 // Remove intrinsic.
2035 CI->eraseFromParent();
2036 return;
2037 }
2038
2039 if (IsX86 && Name == "avx512.mask.store.ss") {
2040 Value *Mask = Builder.CreateAnd(CI->getArgOperand(2), Builder.getInt8(1));
2041 UpgradeMaskedStore(Builder, CI->getArgOperand(0), CI->getArgOperand(1),
2042 Mask, false);
2043
2044 // Remove intrinsic.
2045 CI->eraseFromParent();
2046 return;
2047 }
2048
2049 if (IsX86 && (Name.startswith("avx512.mask.store"))) {
2050 // "avx512.mask.storeu." or "avx512.mask.store."
2051 bool Aligned = Name[17] != 'u'; // "avx512.mask.storeu".
2052 UpgradeMaskedStore(Builder, CI->getArgOperand(0), CI->getArgOperand(1),
2053 CI->getArgOperand(2), Aligned);
2054
2055 // Remove intrinsic.
2056 CI->eraseFromParent();
2057 return;
2058 }
2059
2060 Value *Rep;
2061 // Upgrade packed integer vector compare intrinsics to compare instructions.
2062 if (IsX86 && (Name.startswith("sse2.pcmp") ||
2063 Name.startswith("avx2.pcmp"))) {
2064 // "sse2.pcpmpeq." "sse2.pcmpgt." "avx2.pcmpeq." or "avx2.pcmpgt."
2065 bool CmpEq = Name[9] == 'e';
2066 Rep = Builder.CreateICmp(CmpEq ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_SGT,
2067 CI->getArgOperand(0), CI->getArgOperand(1));
2068 Rep = Builder.CreateSExt(Rep, CI->getType(), "");
2069 } else if (IsX86 && (Name.startswith("avx512.broadcastm"))) {
2070 Type *ExtTy = Type::getInt32Ty(C);
2071 if (CI->getOperand(0)->getType()->isIntegerTy(8))
2072 ExtTy = Type::getInt64Ty(C);
2073 unsigned NumElts = CI->getType()->getPrimitiveSizeInBits() /
2074 ExtTy->getPrimitiveSizeInBits();
2075 Rep = Builder.CreateZExt(CI->getArgOperand(0), ExtTy);
2076 Rep = Builder.CreateVectorSplat(NumElts, Rep);
2077 } else if (IsX86 && (Name == "sse.sqrt.ss" ||
2078 Name == "sse2.sqrt.sd")) {
2079 Value *Vec = CI->getArgOperand(0);
2080 Value *Elt0 = Builder.CreateExtractElement(Vec, (uint64_t)0);
2081 Function *Intr = Intrinsic::getDeclaration(F->getParent(),
2082 Intrinsic::sqrt, Elt0->getType());
2083 Elt0 = Builder.CreateCall(Intr, Elt0);
2084 Rep = Builder.CreateInsertElement(Vec, Elt0, (uint64_t)0);
2085 } else if (IsX86 && (Name.startswith("avx.sqrt.p") ||
2086 Name.startswith("sse2.sqrt.p") ||
2087 Name.startswith("sse.sqrt.p"))) {
2088 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(),
2089 Intrinsic::sqrt,
2090 CI->getType()),
2091 {CI->getArgOperand(0)});
2092 } else if (IsX86 && (Name.startswith("avx512.mask.sqrt.p"))) {
2093 if (CI->arg_size() == 4 &&
2094 (!isa<ConstantInt>(CI->getArgOperand(3)) ||
2095 cast<ConstantInt>(CI->getArgOperand(3))->getZExtValue() != 4)) {
2096 Intrinsic::ID IID = Name[18] == 's' ? Intrinsic::x86_avx512_sqrt_ps_512
2097 : Intrinsic::x86_avx512_sqrt_pd_512;
2098
2099 Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(3) };
2100 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(),
2101 IID), Args);
2102 } else {
2103 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(),
2104 Intrinsic::sqrt,
2105 CI->getType()),
2106 {CI->getArgOperand(0)});
2107 }
2108 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
2109 CI->getArgOperand(1));
2110 } else if (IsX86 && (Name.startswith("avx512.ptestm") ||
2111 Name.startswith("avx512.ptestnm"))) {
2112 Value *Op0 = CI->getArgOperand(0);
2113 Value *Op1 = CI->getArgOperand(1);
2114 Value *Mask = CI->getArgOperand(2);
2115 Rep = Builder.CreateAnd(Op0, Op1);
2116 llvm::Type *Ty = Op0->getType();
2117 Value *Zero = llvm::Constant::getNullValue(Ty);
2118 ICmpInst::Predicate Pred =
2119 Name.startswith("avx512.ptestm") ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ;
2120 Rep = Builder.CreateICmp(Pred, Rep, Zero);
2121 Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, Mask);
2122 } else if (IsX86 && (Name.startswith("avx512.mask.pbroadcast"))){
2123 unsigned NumElts = cast<FixedVectorType>(CI->getArgOperand(1)->getType())
2124 ->getNumElements();
2125 Rep = Builder.CreateVectorSplat(NumElts, CI->getArgOperand(0));
2126 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
2127 CI->getArgOperand(1));
2128 } else if (IsX86 && (Name.startswith("avx512.kunpck"))) {
2129 unsigned NumElts = CI->getType()->getScalarSizeInBits();
2130 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), NumElts);
2131 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), NumElts);
2132 int Indices[64];
2133 for (unsigned i = 0; i != NumElts; ++i)
2134 Indices[i] = i;
2135
2136 // First extract half of each vector. This gives better codegen than
2137 // doing it in a single shuffle.
2138 LHS = Builder.CreateShuffleVector(LHS, LHS,
2139 makeArrayRef(Indices, NumElts / 2));
2140 RHS = Builder.CreateShuffleVector(RHS, RHS,
2141 makeArrayRef(Indices, NumElts / 2));
2142 // Concat the vectors.
2143 // NOTE: Operands have to be swapped to match intrinsic definition.
2144 Rep = Builder.CreateShuffleVector(RHS, LHS,
2145 makeArrayRef(Indices, NumElts));
2146 Rep = Builder.CreateBitCast(Rep, CI->getType());
2147 } else if (IsX86 && Name == "avx512.kand.w") {
2148 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
2149 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
2150 Rep = Builder.CreateAnd(LHS, RHS);
2151 Rep = Builder.CreateBitCast(Rep, CI->getType());
2152 } else if (IsX86 && Name == "avx512.kandn.w") {
2153 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
2154 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
2155 LHS = Builder.CreateNot(LHS);
2156 Rep = Builder.CreateAnd(LHS, RHS);
2157 Rep = Builder.CreateBitCast(Rep, CI->getType());
2158 } else if (IsX86 && Name == "avx512.kor.w") {
2159 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
2160 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
2161 Rep = Builder.CreateOr(LHS, RHS);
2162 Rep = Builder.CreateBitCast(Rep, CI->getType());
2163 } else if (IsX86 && Name == "avx512.kxor.w") {
2164 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
2165 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
2166 Rep = Builder.CreateXor(LHS, RHS);
2167 Rep = Builder.CreateBitCast(Rep, CI->getType());
2168 } else if (IsX86 && Name == "avx512.kxnor.w") {
2169 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
2170 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
2171 LHS = Builder.CreateNot(LHS);
2172 Rep = Builder.CreateXor(LHS, RHS);
2173 Rep = Builder.CreateBitCast(Rep, CI->getType());
2174 } else if (IsX86 && Name == "avx512.knot.w") {
2175 Rep = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
2176 Rep = Builder.CreateNot(Rep);
2177 Rep = Builder.CreateBitCast(Rep, CI->getType());
2178 } else if (IsX86 &&
2179 (Name == "avx512.kortestz.w" || Name == "avx512.kortestc.w")) {
2180 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
2181 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
2182 Rep = Builder.CreateOr(LHS, RHS);
2183 Rep = Builder.CreateBitCast(Rep, Builder.getInt16Ty());
2184 Value *C;
2185 if (Name[14] == 'c')
2186 C = ConstantInt::getAllOnesValue(Builder.getInt16Ty());
2187 else
2188 C = ConstantInt::getNullValue(Builder.getInt16Ty());
2189 Rep = Builder.CreateICmpEQ(Rep, C);
2190 Rep = Builder.CreateZExt(Rep, Builder.getInt32Ty());
2191 } else if (IsX86 && (Name == "sse.add.ss" || Name == "sse2.add.sd" ||
2192 Name == "sse.sub.ss" || Name == "sse2.sub.sd" ||
2193 Name == "sse.mul.ss" || Name == "sse2.mul.sd" ||
2194 Name == "sse.div.ss" || Name == "sse2.div.sd")) {
2195 Type *I32Ty = Type::getInt32Ty(C);
2196 Value *Elt0 = Builder.CreateExtractElement(CI->getArgOperand(0),
2197 ConstantInt::get(I32Ty, 0));
2198 Value *Elt1 = Builder.CreateExtractElement(CI->getArgOperand(1),
2199 ConstantInt::get(I32Ty, 0));
2200 Value *EltOp;
2201 if (Name.contains(".add."))
2202 EltOp = Builder.CreateFAdd(Elt0, Elt1);
2203 else if (Name.contains(".sub."))
2204 EltOp = Builder.CreateFSub(Elt0, Elt1);
2205 else if (Name.contains(".mul."))
2206 EltOp = Builder.CreateFMul(Elt0, Elt1);
2207 else
2208 EltOp = Builder.CreateFDiv(Elt0, Elt1);
2209 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), EltOp,
2210 ConstantInt::get(I32Ty, 0));
2211 } else if (IsX86 && Name.startswith("avx512.mask.pcmp")) {
2212 // "avx512.mask.pcmpeq." or "avx512.mask.pcmpgt."
2213 bool CmpEq = Name[16] == 'e';
2214 Rep = upgradeMaskedCompare(Builder, *CI, CmpEq ? 0 : 6, true);
2215 } else if (IsX86 && Name.startswith("avx512.mask.vpshufbitqmb.")) {
2216 Type *OpTy = CI->getArgOperand(0)->getType();
2217 unsigned VecWidth = OpTy->getPrimitiveSizeInBits();
2218 Intrinsic::ID IID;
2219 switch (VecWidth) {
2220 default: llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 2220)
;
2221 case 128: IID = Intrinsic::x86_avx512_vpshufbitqmb_128; break;
2222 case 256: IID = Intrinsic::x86_avx512_vpshufbitqmb_256; break;
2223 case 512: IID = Intrinsic::x86_avx512_vpshufbitqmb_512; break;
2224 }
2225
2226 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
2227 { CI->getOperand(0), CI->getArgOperand(1) });
2228 Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, CI->getArgOperand(2));
2229 } else if (IsX86 && Name.startswith("avx512.mask.fpclass.p")) {
2230 Type *OpTy = CI->getArgOperand(0)->getType();
2231 unsigned VecWidth = OpTy->getPrimitiveSizeInBits();
2232 unsigned EltWidth = OpTy->getScalarSizeInBits();
2233 Intrinsic::ID IID;
2234 if (VecWidth == 128 && EltWidth == 32)
2235 IID = Intrinsic::x86_avx512_fpclass_ps_128;
2236 else if (VecWidth == 256 && EltWidth == 32)
2237 IID = Intrinsic::x86_avx512_fpclass_ps_256;
2238 else if (VecWidth == 512 && EltWidth == 32)
2239 IID = Intrinsic::x86_avx512_fpclass_ps_512;
2240 else if (VecWidth == 128 && EltWidth == 64)
2241 IID = Intrinsic::x86_avx512_fpclass_pd_128;
2242 else if (VecWidth == 256 && EltWidth == 64)
2243 IID = Intrinsic::x86_avx512_fpclass_pd_256;
2244 else if (VecWidth == 512 && EltWidth == 64)
2245 IID = Intrinsic::x86_avx512_fpclass_pd_512;
2246 else
2247 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 2247)
;
2248
2249 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
2250 { CI->getOperand(0), CI->getArgOperand(1) });
2251 Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, CI->getArgOperand(2));
2252 } else if (IsX86 && Name.startswith("avx512.cmp.p")) {
2253 SmallVector<Value *, 4> Args(CI->args());
2254 Type *OpTy = Args[0]->getType();
2255 unsigned VecWidth = OpTy->getPrimitiveSizeInBits();
2256 unsigned EltWidth = OpTy->getScalarSizeInBits();
2257 Intrinsic::ID IID;
2258 if (VecWidth == 128 && EltWidth == 32)
2259 IID = Intrinsic::x86_avx512_mask_cmp_ps_128;
2260 else if (VecWidth == 256 && EltWidth == 32)
2261 IID = Intrinsic::x86_avx512_mask_cmp_ps_256;
2262 else if (VecWidth == 512 && EltWidth == 32)
2263 IID = Intrinsic::x86_avx512_mask_cmp_ps_512;
2264 else if (VecWidth == 128 && EltWidth == 64)
2265 IID = Intrinsic::x86_avx512_mask_cmp_pd_128;
2266 else if (VecWidth == 256 && EltWidth == 64)
2267 IID = Intrinsic::x86_avx512_mask_cmp_pd_256;
2268 else if (VecWidth == 512 && EltWidth == 64)
2269 IID = Intrinsic::x86_avx512_mask_cmp_pd_512;
2270 else
2271 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 2271)
;
2272
2273 Value *Mask = Constant::getAllOnesValue(CI->getType());
2274 if (VecWidth == 512)
2275 std::swap(Mask, Args.back());
2276 Args.push_back(Mask);
2277
2278 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
2279 Args);
2280 } else if (IsX86 && Name.startswith("avx512.mask.cmp.")) {
2281 // Integer compare intrinsics.
2282 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
2283 Rep = upgradeMaskedCompare(Builder, *CI, Imm, true);
2284 } else if (IsX86 && Name.startswith("avx512.mask.ucmp.")) {
2285 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
2286 Rep = upgradeMaskedCompare(Builder, *CI, Imm, false);
2287 } else if (IsX86 && (Name.startswith("avx512.cvtb2mask.") ||
2288 Name.startswith("avx512.cvtw2mask.") ||
2289 Name.startswith("avx512.cvtd2mask.") ||
2290 Name.startswith("avx512.cvtq2mask."))) {
2291 Value *Op = CI->getArgOperand(0);
2292 Value *Zero = llvm::Constant::getNullValue(Op->getType());
2293 Rep = Builder.CreateICmp(ICmpInst::ICMP_SLT, Op, Zero);
2294 Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, nullptr);
2295 } else if(IsX86 && (Name == "ssse3.pabs.b.128" ||
2296 Name == "ssse3.pabs.w.128" ||
2297 Name == "ssse3.pabs.d.128" ||
2298 Name.startswith("avx2.pabs") ||
2299 Name.startswith("avx512.mask.pabs"))) {
2300 Rep = upgradeAbs(Builder, *CI);
2301 } else if (IsX86 && (Name == "sse41.pmaxsb" ||
2302 Name == "sse2.pmaxs.w" ||
2303 Name == "sse41.pmaxsd" ||
2304 Name.startswith("avx2.pmaxs") ||
2305 Name.startswith("avx512.mask.pmaxs"))) {
2306 Rep = UpgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::smax);
2307 } else if (IsX86 && (Name == "sse2.pmaxu.b" ||
2308 Name == "sse41.pmaxuw" ||
2309 Name == "sse41.pmaxud" ||
2310 Name.startswith("avx2.pmaxu") ||
2311 Name.startswith("avx512.mask.pmaxu"))) {
2312 Rep = UpgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::umax);
2313 } else if (IsX86 && (Name == "sse41.pminsb" ||
2314 Name == "sse2.pmins.w" ||
2315 Name == "sse41.pminsd" ||
2316 Name.startswith("avx2.pmins") ||
2317 Name.startswith("avx512.mask.pmins"))) {
2318 Rep = UpgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::smin);
2319 } else if (IsX86 && (Name == "sse2.pminu.b" ||
2320 Name == "sse41.pminuw" ||
2321 Name == "sse41.pminud" ||
2322 Name.startswith("avx2.pminu") ||
2323 Name.startswith("avx512.mask.pminu"))) {
2324 Rep = UpgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::umin);
2325 } else if (IsX86 && (Name == "sse2.pmulu.dq" ||
2326 Name == "avx2.pmulu.dq" ||
2327 Name == "avx512.pmulu.dq.512" ||
2328 Name.startswith("avx512.mask.pmulu.dq."))) {
2329 Rep = upgradePMULDQ(Builder, *CI, /*Signed*/false);
2330 } else if (IsX86 && (Name == "sse41.pmuldq" ||
2331 Name == "avx2.pmul.dq" ||
2332 Name == "avx512.pmul.dq.512" ||
2333 Name.startswith("avx512.mask.pmul.dq."))) {
2334 Rep = upgradePMULDQ(Builder, *CI, /*Signed*/true);
2335 } else if (IsX86 && (Name == "sse.cvtsi2ss" ||
2336 Name == "sse2.cvtsi2sd" ||
2337 Name == "sse.cvtsi642ss" ||
2338 Name == "sse2.cvtsi642sd")) {
2339 Rep = Builder.CreateSIToFP(
2340 CI->getArgOperand(1),
2341 cast<VectorType>(CI->getType())->getElementType());
2342 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0);
2343 } else if (IsX86 && Name == "avx512.cvtusi2sd") {
2344 Rep = Builder.CreateUIToFP(
2345 CI->getArgOperand(1),
2346 cast<VectorType>(CI->getType())->getElementType());
2347 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0);
2348 } else if (IsX86 && Name == "sse2.cvtss2sd") {
2349 Rep = Builder.CreateExtractElement(CI->getArgOperand(1), (uint64_t)0);
2350 Rep = Builder.CreateFPExt(
2351 Rep, cast<VectorType>(CI->getType())->getElementType());
2352 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0);
2353 } else if (IsX86 && (Name == "sse2.cvtdq2pd" ||
2354 Name == "sse2.cvtdq2ps" ||
2355 Name == "avx.cvtdq2.pd.256" ||
2356 Name == "avx.cvtdq2.ps.256" ||
2357 Name.startswith("avx512.mask.cvtdq2pd.") ||
2358 Name.startswith("avx512.mask.cvtudq2pd.") ||
2359 Name.startswith("avx512.mask.cvtdq2ps.") ||
2360 Name.startswith("avx512.mask.cvtudq2ps.") ||
2361 Name.startswith("avx512.mask.cvtqq2pd.") ||
2362 Name.startswith("avx512.mask.cvtuqq2pd.") ||
2363 Name == "avx512.mask.cvtqq2ps.256" ||
2364 Name == "avx512.mask.cvtqq2ps.512" ||
2365 Name == "avx512.mask.cvtuqq2ps.256" ||
2366 Name == "avx512.mask.cvtuqq2ps.512" ||
2367 Name == "sse2.cvtps2pd" ||
2368 Name == "avx.cvt.ps2.pd.256" ||
2369 Name == "avx512.mask.cvtps2pd.128" ||
2370 Name == "avx512.mask.cvtps2pd.256")) {
2371 auto *DstTy = cast<FixedVectorType>(CI->getType());
2372 Rep = CI->getArgOperand(0);
2373 auto *SrcTy = cast<FixedVectorType>(Rep->getType());
2374
2375 unsigned NumDstElts = DstTy->getNumElements();
2376 if (NumDstElts < SrcTy->getNumElements()) {
2377 assert(NumDstElts == 2 && "Unexpected vector size")(static_cast <bool> (NumDstElts == 2 && "Unexpected vector size"
) ? void (0) : __assert_fail ("NumDstElts == 2 && \"Unexpected vector size\""
, "llvm/lib/IR/AutoUpgrade.cpp", 2377, __extension__ __PRETTY_FUNCTION__
))
;
2378 Rep = Builder.CreateShuffleVector(Rep, Rep, ArrayRef<int>{0, 1});
2379 }
2380
2381 bool IsPS2PD = SrcTy->getElementType()->isFloatTy();
2382 bool IsUnsigned = (StringRef::npos != Name.find("cvtu"));
2383 if (IsPS2PD)
2384 Rep = Builder.CreateFPExt(Rep, DstTy, "cvtps2pd");
2385 else if (CI->arg_size() == 4 &&
2386 (!isa<ConstantInt>(CI->getArgOperand(3)) ||
2387 cast<ConstantInt>(CI->getArgOperand(3))->getZExtValue() != 4)) {
2388 Intrinsic::ID IID = IsUnsigned ? Intrinsic::x86_avx512_uitofp_round
2389 : Intrinsic::x86_avx512_sitofp_round;
2390 Function *F = Intrinsic::getDeclaration(CI->getModule(), IID,
2391 { DstTy, SrcTy });
2392 Rep = Builder.CreateCall(F, { Rep, CI->getArgOperand(3) });
2393 } else {
2394 Rep = IsUnsigned ? Builder.CreateUIToFP(Rep, DstTy, "cvt")
2395 : Builder.CreateSIToFP(Rep, DstTy, "cvt");
2396 }
2397
2398 if (CI->arg_size() >= 3)
2399 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
2400 CI->getArgOperand(1));
2401 } else if (IsX86 && (Name.startswith("avx512.mask.vcvtph2ps.") ||
2402 Name.startswith("vcvtph2ps."))) {
2403 auto *DstTy = cast<FixedVectorType>(CI->getType());
2404 Rep = CI->getArgOperand(0);
2405 auto *SrcTy = cast<FixedVectorType>(Rep->getType());
2406 unsigned NumDstElts = DstTy->getNumElements();
2407 if (NumDstElts != SrcTy->getNumElements()) {
2408 assert(NumDstElts == 4 && "Unexpected vector size")(static_cast <bool> (NumDstElts == 4 && "Unexpected vector size"
) ? void (0) : __assert_fail ("NumDstElts == 4 && \"Unexpected vector size\""
, "llvm/lib/IR/AutoUpgrade.cpp", 2408, __extension__ __PRETTY_FUNCTION__
))
;
2409 Rep = Builder.CreateShuffleVector(Rep, Rep, ArrayRef<int>{0, 1, 2, 3});
2410 }
2411 Rep = Builder.CreateBitCast(
2412 Rep, FixedVectorType::get(Type::getHalfTy(C), NumDstElts));
2413 Rep = Builder.CreateFPExt(Rep, DstTy, "cvtph2ps");
2414 if (CI->arg_size() >= 3)
2415 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
2416 CI->getArgOperand(1));
2417 } else if (IsX86 && Name.startswith("avx512.mask.load")) {
2418 // "avx512.mask.loadu." or "avx512.mask.load."
2419 bool Aligned = Name[16] != 'u'; // "avx512.mask.loadu".
2420 Rep =
2421 UpgradeMaskedLoad(Builder, CI->getArgOperand(0), CI->getArgOperand(1),
2422 CI->getArgOperand(2), Aligned);
2423 } else if (IsX86 && Name.startswith("avx512.mask.expand.load.")) {
2424 auto *ResultTy = cast<FixedVectorType>(CI->getType());
2425 Type *PtrTy = ResultTy->getElementType();
2426
2427 // Cast the pointer to element type.
2428 Value *Ptr = Builder.CreateBitCast(CI->getOperand(0),
2429 llvm::PointerType::getUnqual(PtrTy));
2430
2431 Value *MaskVec = getX86MaskVec(Builder, CI->getArgOperand(2),
2432 ResultTy->getNumElements());
2433
2434 Function *ELd = Intrinsic::getDeclaration(F->getParent(),
2435 Intrinsic::masked_expandload,
2436 ResultTy);
2437 Rep = Builder.CreateCall(ELd, { Ptr, MaskVec, CI->getOperand(1) });
2438 } else if (IsX86 && Name.startswith("avx512.mask.compress.store.")) {
2439 auto *ResultTy = cast<VectorType>(CI->getArgOperand(1)->getType());
2440 Type *PtrTy = ResultTy->getElementType();
2441
2442 // Cast the pointer to element type.
2443 Value *Ptr = Builder.CreateBitCast(CI->getOperand(0),
2444 llvm::PointerType::getUnqual(PtrTy));
2445
2446 Value *MaskVec =
2447 getX86MaskVec(Builder, CI->getArgOperand(2),
2448 cast<FixedVectorType>(ResultTy)->getNumElements());
2449
2450 Function *CSt = Intrinsic::getDeclaration(F->getParent(),
2451 Intrinsic::masked_compressstore,
2452 ResultTy);
2453 Rep = Builder.CreateCall(CSt, { CI->getArgOperand(1), Ptr, MaskVec });
2454 } else if (IsX86 && (Name.startswith("avx512.mask.compress.") ||
2455 Name.startswith("avx512.mask.expand."))) {
2456 auto *ResultTy = cast<FixedVectorType>(CI->getType());
2457
2458 Value *MaskVec = getX86MaskVec(Builder, CI->getArgOperand(2),
2459 ResultTy->getNumElements());
2460
2461 bool IsCompress = Name[12] == 'c';
2462 Intrinsic::ID IID = IsCompress ? Intrinsic::x86_avx512_mask_compress
2463 : Intrinsic::x86_avx512_mask_expand;
2464 Function *Intr = Intrinsic::getDeclaration(F->getParent(), IID, ResultTy);
2465 Rep = Builder.CreateCall(Intr, { CI->getOperand(0), CI->getOperand(1),
2466 MaskVec });
2467 } else if (IsX86 && Name.startswith("xop.vpcom")) {
2468 bool IsSigned;
2469 if (Name.endswith("ub") || Name.endswith("uw") || Name.endswith("ud") ||
2470 Name.endswith("uq"))
2471 IsSigned = false;
2472 else if (Name.endswith("b") || Name.endswith("w") || Name.endswith("d") ||
2473 Name.endswith("q"))
2474 IsSigned = true;
2475 else
2476 llvm_unreachable("Unknown suffix")::llvm::llvm_unreachable_internal("Unknown suffix", "llvm/lib/IR/AutoUpgrade.cpp"
, 2476)
;
2477
2478 unsigned Imm;
2479 if (CI->arg_size() == 3) {
2480 Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
2481 } else {
2482 Name = Name.substr(9); // strip off "xop.vpcom"
2483 if (Name.startswith("lt"))
2484 Imm = 0;
2485 else if (Name.startswith("le"))
2486 Imm = 1;
2487 else if (Name.startswith("gt"))
2488 Imm = 2;
2489 else if (Name.startswith("ge"))
2490 Imm = 3;
2491 else if (Name.startswith("eq"))
2492 Imm = 4;
2493 else if (Name.startswith("ne"))
2494 Imm = 5;
2495 else if (Name.startswith("false"))
2496 Imm = 6;
2497 else if (Name.startswith("true"))
2498 Imm = 7;
2499 else
2500 llvm_unreachable("Unknown condition")::llvm::llvm_unreachable_internal("Unknown condition", "llvm/lib/IR/AutoUpgrade.cpp"
, 2500)
;
2501 }
2502
2503 Rep = upgradeX86vpcom(Builder, *CI, Imm, IsSigned);
2504 } else if (IsX86 && Name.startswith("xop.vpcmov")) {
2505 Value *Sel = CI->getArgOperand(2);
2506 Value *NotSel = Builder.CreateNot(Sel);
2507 Value *Sel0 = Builder.CreateAnd(CI->getArgOperand(0), Sel);
2508 Value *Sel1 = Builder.CreateAnd(CI->getArgOperand(1), NotSel);
2509 Rep = Builder.CreateOr(Sel0, Sel1);
2510 } else if (IsX86 && (Name.startswith("xop.vprot") ||
2511 Name.startswith("avx512.prol") ||
2512 Name.startswith("avx512.mask.prol"))) {
2513 Rep = upgradeX86Rotate(Builder, *CI, false);
2514 } else if (IsX86 && (Name.startswith("avx512.pror") ||
2515 Name.startswith("avx512.mask.pror"))) {
2516 Rep = upgradeX86Rotate(Builder, *CI, true);
2517 } else if (IsX86 && (Name.startswith("avx512.vpshld.") ||
2518 Name.startswith("avx512.mask.vpshld") ||
2519 Name.startswith("avx512.maskz.vpshld"))) {
2520 bool ZeroMask = Name[11] == 'z';
2521 Rep = upgradeX86ConcatShift(Builder, *CI, false, ZeroMask);
2522 } else if (IsX86 && (Name.startswith("avx512.vpshrd.") ||
2523 Name.startswith("avx512.mask.vpshrd") ||
2524 Name.startswith("avx512.maskz.vpshrd"))) {
2525 bool ZeroMask = Name[11] == 'z';
2526 Rep = upgradeX86ConcatShift(Builder, *CI, true, ZeroMask);
2527 } else if (IsX86 && Name == "sse42.crc32.64.8") {
2528 Function *CRC32 = Intrinsic::getDeclaration(F->getParent(),
2529 Intrinsic::x86_sse42_crc32_32_8);
2530 Value *Trunc0 = Builder.CreateTrunc(CI->getArgOperand(0), Type::getInt32Ty(C));
2531 Rep = Builder.CreateCall(CRC32, {Trunc0, CI->getArgOperand(1)});
2532 Rep = Builder.CreateZExt(Rep, CI->getType(), "");
2533 } else if (IsX86 && (Name.startswith("avx.vbroadcast.s") ||
2534 Name.startswith("avx512.vbroadcast.s"))) {
2535 // Replace broadcasts with a series of insertelements.
2536 auto *VecTy = cast<FixedVectorType>(CI->getType());
2537 Type *EltTy = VecTy->getElementType();
2538 unsigned EltNum = VecTy->getNumElements();
2539 Value *Cast = Builder.CreateBitCast(CI->getArgOperand(0),
2540 EltTy->getPointerTo());
2541 Value *Load = Builder.CreateLoad(EltTy, Cast);
2542 Type *I32Ty = Type::getInt32Ty(C);
2543 Rep = PoisonValue::get(VecTy);
2544 for (unsigned I = 0; I < EltNum; ++I)
2545 Rep = Builder.CreateInsertElement(Rep, Load,
2546 ConstantInt::get(I32Ty, I));
2547 } else if (IsX86 && (Name.startswith("sse41.pmovsx") ||
2548 Name.startswith("sse41.pmovzx") ||
2549 Name.startswith("avx2.pmovsx") ||
2550 Name.startswith("avx2.pmovzx") ||
2551 Name.startswith("avx512.mask.pmovsx") ||
2552 Name.startswith("avx512.mask.pmovzx"))) {
2553 auto *DstTy = cast<FixedVectorType>(CI->getType());
2554 unsigned NumDstElts = DstTy->getNumElements();
2555
2556 // Extract a subvector of the first NumDstElts lanes and sign/zero extend.
2557 SmallVector<int, 8> ShuffleMask(NumDstElts);
2558 for (unsigned i = 0; i != NumDstElts; ++i)
2559 ShuffleMask[i] = i;
2560
2561 Value *SV =
2562 Builder.CreateShuffleVector(CI->getArgOperand(0), ShuffleMask);
2563
2564 bool DoSext = (StringRef::npos != Name.find("pmovsx"));
2565 Rep = DoSext ? Builder.CreateSExt(SV, DstTy)
2566 : Builder.CreateZExt(SV, DstTy);
2567 // If there are 3 arguments, it's a masked intrinsic so we need a select.
2568 if (CI->arg_size() == 3)
2569 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
2570 CI->getArgOperand(1));
2571 } else if (Name == "avx512.mask.pmov.qd.256" ||
2572 Name == "avx512.mask.pmov.qd.512" ||
2573 Name == "avx512.mask.pmov.wb.256" ||
2574 Name == "avx512.mask.pmov.wb.512") {
2575 Type *Ty = CI->getArgOperand(1)->getType();
2576 Rep = Builder.CreateTrunc(CI->getArgOperand(0), Ty);
2577 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
2578 CI->getArgOperand(1));
2579 } else if (IsX86 && (Name.startswith("avx.vbroadcastf128") ||
2580 Name == "avx2.vbroadcasti128")) {
2581 // Replace vbroadcastf128/vbroadcasti128 with a vector load+shuffle.
2582 Type *EltTy = cast<VectorType>(CI->getType())->getElementType();
2583 unsigned NumSrcElts = 128 / EltTy->getPrimitiveSizeInBits();
2584 auto *VT = FixedVectorType::get(EltTy, NumSrcElts);
2585 Value *Op = Builder.CreatePointerCast(CI->getArgOperand(0),
2586 PointerType::getUnqual(VT));
2587 Value *Load = Builder.CreateAlignedLoad(VT, Op, Align(1));
2588 if (NumSrcElts == 2)
2589 Rep = Builder.CreateShuffleVector(Load, ArrayRef<int>{0, 1, 0, 1});
2590 else
2591 Rep = Builder.CreateShuffleVector(
2592 Load, ArrayRef<int>{0, 1, 2, 3, 0, 1, 2, 3});
2593 } else if (IsX86 && (Name.startswith("avx512.mask.shuf.i") ||
2594 Name.startswith("avx512.mask.shuf.f"))) {
2595 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
2596 Type *VT = CI->getType();
2597 unsigned NumLanes = VT->getPrimitiveSizeInBits() / 128;
2598 unsigned NumElementsInLane = 128 / VT->getScalarSizeInBits();
2599 unsigned ControlBitsMask = NumLanes - 1;
2600 unsigned NumControlBits = NumLanes / 2;
2601 SmallVector<int, 8> ShuffleMask(0);
2602
2603 for (unsigned l = 0; l != NumLanes; ++l) {
2604 unsigned LaneMask = (Imm >> (l * NumControlBits)) & ControlBitsMask;
2605 // We actually need the other source.
2606 if (l >= NumLanes / 2)
2607 LaneMask += NumLanes;
2608 for (unsigned i = 0; i != NumElementsInLane; ++i)
2609 ShuffleMask.push_back(LaneMask * NumElementsInLane + i);
2610 }
2611 Rep = Builder.CreateShuffleVector(CI->getArgOperand(0),
2612 CI->getArgOperand(1), ShuffleMask);
2613 Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep,
2614 CI->getArgOperand(3));
2615 }else if (IsX86 && (Name.startswith("avx512.mask.broadcastf") ||
2616 Name.startswith("avx512.mask.broadcasti"))) {
2617 unsigned NumSrcElts =
2618 cast<FixedVectorType>(CI->getArgOperand(0)->getType())
2619 ->getNumElements();
2620 unsigned NumDstElts =
2621 cast<FixedVectorType>(CI->getType())->getNumElements();
2622
2623 SmallVector<int, 8> ShuffleMask(NumDstElts);
2624 for (unsigned i = 0; i != NumDstElts; ++i)
2625 ShuffleMask[i] = i % NumSrcElts;
2626
2627 Rep = Builder.CreateShuffleVector(CI->getArgOperand(0),
2628 CI->getArgOperand(0),
2629 ShuffleMask);
2630 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
2631 CI->getArgOperand(1));
2632 } else if (IsX86 && (Name.startswith("avx2.pbroadcast") ||
2633 Name.startswith("avx2.vbroadcast") ||
2634 Name.startswith("avx512.pbroadcast") ||
2635 Name.startswith("avx512.mask.broadcast.s"))) {
2636 // Replace vp?broadcasts with a vector shuffle.
2637 Value *Op = CI->getArgOperand(0);
2638 ElementCount EC = cast<VectorType>(CI->getType())->getElementCount();
2639 Type *MaskTy = VectorType::get(Type::getInt32Ty(C), EC);
2640 SmallVector<int, 8> M;
2641 ShuffleVectorInst::getShuffleMask(Constant::getNullValue(MaskTy), M);
2642 Rep = Builder.CreateShuffleVector(Op, M);
2643
2644 if (CI->arg_size() == 3)
2645 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
2646 CI->getArgOperand(1));
2647 } else if (IsX86 && (Name.startswith("sse2.padds.") ||
2648 Name.startswith("avx2.padds.") ||
2649 Name.startswith("avx512.padds.") ||
2650 Name.startswith("avx512.mask.padds."))) {
2651 Rep = UpgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::sadd_sat);
2652 } else if (IsX86 && (Name.startswith("sse2.psubs.") ||
2653 Name.startswith("avx2.psubs.") ||
2654 Name.startswith("avx512.psubs.") ||
2655 Name.startswith("avx512.mask.psubs."))) {
2656 Rep = UpgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::ssub_sat);
2657 } else if (IsX86 && (Name.startswith("sse2.paddus.") ||
2658 Name.startswith("avx2.paddus.") ||
2659 Name.startswith("avx512.mask.paddus."))) {
2660 Rep = UpgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::uadd_sat);
2661 } else if (IsX86 && (Name.startswith("sse2.psubus.") ||
2662 Name.startswith("avx2.psubus.") ||
2663 Name.startswith("avx512.mask.psubus."))) {
2664 Rep = UpgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::usub_sat);
2665 } else if (IsX86 && Name.startswith("avx512.mask.palignr.")) {
2666 Rep = UpgradeX86ALIGNIntrinsics(Builder, CI->getArgOperand(0),
2667 CI->getArgOperand(1),
2668 CI->getArgOperand(2),
2669 CI->getArgOperand(3),
2670 CI->getArgOperand(4),
2671 false);
2672 } else if (IsX86 && Name.startswith("avx512.mask.valign.")) {
2673 Rep = UpgradeX86ALIGNIntrinsics(Builder, CI->getArgOperand(0),
2674 CI->getArgOperand(1),
2675 CI->getArgOperand(2),
2676 CI->getArgOperand(3),
2677 CI->getArgOperand(4),
2678 true);
2679 } else if (IsX86 && (Name == "sse2.psll.dq" ||
2680 Name == "avx2.psll.dq")) {
2681 // 128/256-bit shift left specified in bits.
2682 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
2683 Rep = UpgradeX86PSLLDQIntrinsics(Builder, CI->getArgOperand(0),
2684 Shift / 8); // Shift is in bits.
2685 } else if (IsX86 && (Name == "sse2.psrl.dq" ||
2686 Name == "avx2.psrl.dq")) {
2687 // 128/256-bit shift right specified in bits.
2688 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
2689 Rep = UpgradeX86PSRLDQIntrinsics(Builder, CI->getArgOperand(0),
2690 Shift / 8); // Shift is in bits.
2691 } else if (IsX86 && (Name == "sse2.psll.dq.bs" ||
2692 Name == "avx2.psll.dq.bs" ||
2693 Name == "avx512.psll.dq.512")) {
2694 // 128/256/512-bit shift left specified in bytes.
2695 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
2696 Rep = UpgradeX86PSLLDQIntrinsics(Builder, CI->getArgOperand(0), Shift);
2697 } else if (IsX86 && (Name == "sse2.psrl.dq.bs" ||
2698 Name == "avx2.psrl.dq.bs" ||
2699 Name == "avx512.psrl.dq.512")) {
2700 // 128/256/512-bit shift right specified in bytes.
2701 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
2702 Rep = UpgradeX86PSRLDQIntrinsics(Builder, CI->getArgOperand(0), Shift);
2703 } else if (IsX86 && (Name == "sse41.pblendw" ||
2704 Name.startswith("sse41.blendp") ||
2705 Name.startswith("avx.blend.p") ||
2706 Name == "avx2.pblendw" ||
2707 Name.startswith("avx2.pblendd."))) {
2708 Value *Op0 = CI->getArgOperand(0);
2709 Value *Op1 = CI->getArgOperand(1);
2710 unsigned Imm = cast <ConstantInt>(CI->getArgOperand(2))->getZExtValue();
2711 auto *VecTy = cast<FixedVectorType>(CI->getType());
2712 unsigned NumElts = VecTy->getNumElements();
2713
2714 SmallVector<int, 16> Idxs(NumElts);
2715 for (unsigned i = 0; i != NumElts; ++i)
2716 Idxs[i] = ((Imm >> (i%8)) & 1) ? i + NumElts : i;
2717
2718 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
2719 } else if (IsX86 && (Name.startswith("avx.vinsertf128.") ||
2720 Name == "avx2.vinserti128" ||
2721 Name.startswith("avx512.mask.insert"))) {
2722 Value *Op0 = CI->getArgOperand(0);
2723 Value *Op1 = CI->getArgOperand(1);
2724 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
2725 unsigned DstNumElts =
2726 cast<FixedVectorType>(CI->getType())->getNumElements();
2727 unsigned SrcNumElts =
2728 cast<FixedVectorType>(Op1->getType())->getNumElements();
2729 unsigned Scale = DstNumElts / SrcNumElts;
2730
2731 // Mask off the high bits of the immediate value; hardware ignores those.
2732 Imm = Imm % Scale;
2733
2734 // Extend the second operand into a vector the size of the destination.
2735 SmallVector<int, 8> Idxs(DstNumElts);
2736 for (unsigned i = 0; i != SrcNumElts; ++i)
2737 Idxs[i] = i;
2738 for (unsigned i = SrcNumElts; i != DstNumElts; ++i)
2739 Idxs[i] = SrcNumElts;
2740 Rep = Builder.CreateShuffleVector(Op1, Idxs);
2741
2742 // Insert the second operand into the first operand.
2743
2744 // Note that there is no guarantee that instruction lowering will actually
2745 // produce a vinsertf128 instruction for the created shuffles. In
2746 // particular, the 0 immediate case involves no lane changes, so it can
2747 // be handled as a blend.
2748
2749 // Example of shuffle mask for 32-bit elements:
2750 // Imm = 1 <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
2751 // Imm = 0 <i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7 >
2752
2753 // First fill with identify mask.
2754 for (unsigned i = 0; i != DstNumElts; ++i)
2755 Idxs[i] = i;
2756 // Then replace the elements where we need to insert.
2757 for (unsigned i = 0; i != SrcNumElts; ++i)
2758 Idxs[i + Imm * SrcNumElts] = i + DstNumElts;
2759 Rep = Builder.CreateShuffleVector(Op0, Rep, Idxs);
2760
2761 // If the intrinsic has a mask operand, handle that.
2762 if (CI->arg_size() == 5)
2763 Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep,
2764 CI->getArgOperand(3));
2765 } else if (IsX86 && (Name.startswith("avx.vextractf128.") ||
2766 Name == "avx2.vextracti128" ||
2767 Name.startswith("avx512.mask.vextract"))) {
2768 Value *Op0 = CI->getArgOperand(0);
2769 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
2770 unsigned DstNumElts =
2771 cast<FixedVectorType>(CI->getType())->getNumElements();
2772 unsigned SrcNumElts =
2773 cast<FixedVectorType>(Op0->getType())->getNumElements();
2774 unsigned Scale = SrcNumElts / DstNumElts;
2775
2776 // Mask off the high bits of the immediate value; hardware ignores those.
2777 Imm = Imm % Scale;
2778
2779 // Get indexes for the subvector of the input vector.
2780 SmallVector<int, 8> Idxs(DstNumElts);
2781 for (unsigned i = 0; i != DstNumElts; ++i) {
2782 Idxs[i] = i + (Imm * DstNumElts);
2783 }
2784 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
2785
2786 // If the intrinsic has a mask operand, handle that.
2787 if (CI->arg_size() == 4)
2788 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
2789 CI->getArgOperand(2));
2790 } else if (!IsX86 && Name == "stackprotectorcheck") {
2791 Rep = nullptr;
2792 } else if (IsX86 && (Name.startswith("avx512.mask.perm.df.") ||
2793 Name.startswith("avx512.mask.perm.di."))) {
2794 Value *Op0 = CI->getArgOperand(0);
2795 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
2796 auto *VecTy = cast<FixedVectorType>(CI->getType());
2797 unsigned NumElts = VecTy->getNumElements();
2798
2799 SmallVector<int, 8> Idxs(NumElts);
2800 for (unsigned i = 0; i != NumElts; ++i)
2801 Idxs[i] = (i & ~0x3) + ((Imm >> (2 * (i & 0x3))) & 3);
2802
2803 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
2804
2805 if (CI->arg_size() == 4)
2806 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
2807 CI->getArgOperand(2));
2808 } else if (IsX86 && (Name.startswith("avx.vperm2f128.") ||
2809 Name == "avx2.vperm2i128")) {
2810 // The immediate permute control byte looks like this:
2811 // [1:0] - select 128 bits from sources for low half of destination
2812 // [2] - ignore
2813 // [3] - zero low half of destination
2814 // [5:4] - select 128 bits from sources for high half of destination
2815 // [6] - ignore
2816 // [7] - zero high half of destination
2817
2818 uint8_t Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
2819
2820 unsigned NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
2821 unsigned HalfSize = NumElts / 2;
2822 SmallVector<int, 8> ShuffleMask(NumElts);
2823
2824 // Determine which operand(s) are actually in use for this instruction.
2825 Value *V0 = (Imm & 0x02) ? CI->getArgOperand(1) : CI->getArgOperand(0);
2826 Value *V1 = (Imm & 0x20) ? CI->getArgOperand(1) : CI->getArgOperand(0);
2827
2828 // If needed, replace operands based on zero mask.
2829 V0 = (Imm & 0x08) ? ConstantAggregateZero::get(CI->getType()) : V0;
2830 V1 = (Imm & 0x80) ? ConstantAggregateZero::get(CI->getType()) : V1;
2831
2832 // Permute low half of result.
2833 unsigned StartIndex = (Imm & 0x01) ? HalfSize : 0;
2834 for (unsigned i = 0; i < HalfSize; ++i)
2835 ShuffleMask[i] = StartIndex + i;
2836
2837 // Permute high half of result.
2838 StartIndex = (Imm & 0x10) ? HalfSize : 0;
2839 for (unsigned i = 0; i < HalfSize; ++i)
2840 ShuffleMask[i + HalfSize] = NumElts + StartIndex + i;
2841
2842 Rep = Builder.CreateShuffleVector(V0, V1, ShuffleMask);
2843
2844 } else if (IsX86 && (Name.startswith("avx.vpermil.") ||
2845 Name == "sse2.pshuf.d" ||
2846 Name.startswith("avx512.mask.vpermil.p") ||
2847 Name.startswith("avx512.mask.pshuf.d."))) {
2848 Value *Op0 = CI->getArgOperand(0);
2849 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
2850 auto *VecTy = cast<FixedVectorType>(CI->getType());
2851 unsigned NumElts = VecTy->getNumElements();
2852 // Calculate the size of each index in the immediate.
2853 unsigned IdxSize = 64 / VecTy->getScalarSizeInBits();
2854 unsigned IdxMask = ((1 << IdxSize) - 1);
2855
2856 SmallVector<int, 8> Idxs(NumElts);
2857 // Lookup the bits for this element, wrapping around the immediate every
2858 // 8-bits. Elements are grouped into sets of 2 or 4 elements so we need
2859 // to offset by the first index of each group.
2860 for (unsigned i = 0; i != NumElts; ++i)
2861 Idxs[i] = ((Imm >> ((i * IdxSize) % 8)) & IdxMask) | (i & ~IdxMask);
2862
2863 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
2864
2865 if (CI->arg_size() == 4)
2866 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
2867 CI->getArgOperand(2));
2868 } else if (IsX86 && (Name == "sse2.pshufl.w" ||
2869 Name.startswith("avx512.mask.pshufl.w."))) {
2870 Value *Op0 = CI->getArgOperand(0);
2871 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
2872 unsigned NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
2873
2874 SmallVector<int, 16> Idxs(NumElts);
2875 for (unsigned l = 0; l != NumElts; l += 8) {
2876 for (unsigned i = 0; i != 4; ++i)
2877 Idxs[i + l] = ((Imm >> (2 * i)) & 0x3) + l;
2878 for (unsigned i = 4; i != 8; ++i)
2879 Idxs[i + l] = i + l;
2880 }
2881
2882 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
2883
2884 if (CI->arg_size() == 4)
2885 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
2886 CI->getArgOperand(2));
2887 } else if (IsX86 && (Name == "sse2.pshufh.w" ||
2888 Name.startswith("avx512.mask.pshufh.w."))) {
2889 Value *Op0 = CI->getArgOperand(0);
2890 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
2891 unsigned NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
2892
2893 SmallVector<int, 16> Idxs(NumElts);
2894 for (unsigned l = 0; l != NumElts; l += 8) {
2895 for (unsigned i = 0; i != 4; ++i)
2896 Idxs[i + l] = i + l;
2897 for (unsigned i = 0; i != 4; ++i)
2898 Idxs[i + l + 4] = ((Imm >> (2 * i)) & 0x3) + 4 + l;
2899 }
2900
2901 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
2902
2903 if (CI->arg_size() == 4)
2904 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
2905 CI->getArgOperand(2));
2906 } else if (IsX86 && Name.startswith("avx512.mask.shuf.p")) {
2907 Value *Op0 = CI->getArgOperand(0);
2908 Value *Op1 = CI->getArgOperand(1);
2909 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
2910 unsigned NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
2911
2912 unsigned NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
2913 unsigned HalfLaneElts = NumLaneElts / 2;
2914
2915 SmallVector<int, 16> Idxs(NumElts);
2916 for (unsigned i = 0; i != NumElts; ++i) {
2917 // Base index is the starting element of the lane.
2918 Idxs[i] = i - (i % NumLaneElts);
2919 // If we are half way through the lane switch to the other source.
2920 if ((i % NumLaneElts) >= HalfLaneElts)
2921 Idxs[i] += NumElts;
2922 // Now select the specific element. By adding HalfLaneElts bits from
2923 // the immediate. Wrapping around the immediate every 8-bits.
2924 Idxs[i] += (Imm >> ((i * HalfLaneElts) % 8)) & ((1 << HalfLaneElts) - 1);
2925 }
2926
2927 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
2928
2929 Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep,
2930 CI->getArgOperand(3));
2931 } else if (IsX86 && (Name.startswith("avx512.mask.movddup") ||
2932 Name.startswith("avx512.mask.movshdup") ||
2933 Name.startswith("avx512.mask.movsldup"))) {
2934 Value *Op0 = CI->getArgOperand(0);
2935 unsigned NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
2936 unsigned NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
2937
2938 unsigned Offset = 0;
2939 if (Name.startswith("avx512.mask.movshdup."))
2940 Offset = 1;
2941
2942 SmallVector<int, 16> Idxs(NumElts);
2943 for (unsigned l = 0; l != NumElts; l += NumLaneElts)
2944 for (unsigned i = 0; i != NumLaneElts; i += 2) {
2945 Idxs[i + l + 0] = i + l + Offset;
2946 Idxs[i + l + 1] = i + l + Offset;
2947 }
2948
2949 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
2950
2951 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
2952 CI->getArgOperand(1));
2953 } else if (IsX86 && (Name.startswith("avx512.mask.punpckl") ||
2954 Name.startswith("avx512.mask.unpckl."))) {
2955 Value *Op0 = CI->getArgOperand(0);
2956 Value *Op1 = CI->getArgOperand(1);
2957 int NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
2958 int NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
2959
2960 SmallVector<int, 64> Idxs(NumElts);
2961 for (int l = 0; l != NumElts; l += NumLaneElts)
2962 for (int i = 0; i != NumLaneElts; ++i)
2963 Idxs[i + l] = l + (i / 2) + NumElts * (i % 2);
2964
2965 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
2966
2967 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
2968 CI->getArgOperand(2));
2969 } else if (IsX86 && (Name.startswith("avx512.mask.punpckh") ||
2970 Name.startswith("avx512.mask.unpckh."))) {
2971 Value *Op0 = CI->getArgOperand(0);
2972 Value *Op1 = CI->getArgOperand(1);
2973 int NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
2974 int NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
2975
2976 SmallVector<int, 64> Idxs(NumElts);
2977 for (int l = 0; l != NumElts; l += NumLaneElts)
2978 for (int i = 0; i != NumLaneElts; ++i)
2979 Idxs[i + l] = (NumLaneElts / 2) + l + (i / 2) + NumElts * (i % 2);
2980
2981 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
2982
2983 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
2984 CI->getArgOperand(2));
2985 } else if (IsX86 && (Name.startswith("avx512.mask.and.") ||
2986 Name.startswith("avx512.mask.pand."))) {
2987 VectorType *FTy = cast<VectorType>(CI->getType());
2988 VectorType *ITy = VectorType::getInteger(FTy);
2989 Rep = Builder.CreateAnd(Builder.CreateBitCast(CI->getArgOperand(0), ITy),
2990 Builder.CreateBitCast(CI->getArgOperand(1), ITy));
2991 Rep = Builder.CreateBitCast(Rep, FTy);
2992 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
2993 CI->getArgOperand(2));
2994 } else if (IsX86 && (Name.startswith("avx512.mask.andn.") ||
2995 Name.startswith("avx512.mask.pandn."))) {
2996 VectorType *FTy = cast<VectorType>(CI->getType());
2997 VectorType *ITy = VectorType::getInteger(FTy);
2998 Rep = Builder.CreateNot(Builder.CreateBitCast(CI->getArgOperand(0), ITy));
2999 Rep = Builder.CreateAnd(Rep,
3000 Builder.CreateBitCast(CI->getArgOperand(1), ITy));
3001 Rep = Builder.CreateBitCast(Rep, FTy);
3002 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3003 CI->getArgOperand(2));
3004 } else if (IsX86 && (Name.startswith("avx512.mask.or.") ||
3005 Name.startswith("avx512.mask.por."))) {
3006 VectorType *FTy = cast<VectorType>(CI->getType());
3007 VectorType *ITy = VectorType::getInteger(FTy);
3008 Rep = Builder.CreateOr(Builder.CreateBitCast(CI->getArgOperand(0), ITy),
3009 Builder.CreateBitCast(CI->getArgOperand(1), ITy));
3010 Rep = Builder.CreateBitCast(Rep, FTy);
3011 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3012 CI->getArgOperand(2));
3013 } else if (IsX86 && (Name.startswith("avx512.mask.xor.") ||
3014 Name.startswith("avx512.mask.pxor."))) {
3015 VectorType *FTy = cast<VectorType>(CI->getType());
3016 VectorType *ITy = VectorType::getInteger(FTy);
3017 Rep = Builder.CreateXor(Builder.CreateBitCast(CI->getArgOperand(0), ITy),
3018 Builder.CreateBitCast(CI->getArgOperand(1), ITy));
3019 Rep = Builder.CreateBitCast(Rep, FTy);
3020 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3021 CI->getArgOperand(2));
3022 } else if (IsX86 && Name.startswith("avx512.mask.padd.")) {
3023 Rep = Builder.CreateAdd(CI->getArgOperand(0), CI->getArgOperand(1));
3024 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3025 CI->getArgOperand(2));
3026 } else if (IsX86 && Name.startswith("avx512.mask.psub.")) {
3027 Rep = Builder.CreateSub(CI->getArgOperand(0), CI->getArgOperand(1));
3028 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3029 CI->getArgOperand(2));
3030 } else if (IsX86 && Name.startswith("avx512.mask.pmull.")) {
3031 Rep = Builder.CreateMul(CI->getArgOperand(0), CI->getArgOperand(1));
3032 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3033 CI->getArgOperand(2));
3034 } else if (IsX86 && Name.startswith("avx512.mask.add.p")) {
3035 if (Name.endswith(".512")) {
3036 Intrinsic::ID IID;
3037 if (Name[17] == 's')
3038 IID = Intrinsic::x86_avx512_add_ps_512;
3039 else
3040 IID = Intrinsic::x86_avx512_add_pd_512;
3041
3042 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
3043 { CI->getArgOperand(0), CI->getArgOperand(1),
3044 CI->getArgOperand(4) });
3045 } else {
3046 Rep = Builder.CreateFAdd(CI->getArgOperand(0), CI->getArgOperand(1));
3047 }
3048 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3049 CI->getArgOperand(2));
3050 } else if (IsX86 && Name.startswith("avx512.mask.div.p")) {
3051 if (Name.endswith(".512")) {
3052 Intrinsic::ID IID;
3053 if (Name[17] == 's')
3054 IID = Intrinsic::x86_avx512_div_ps_512;
3055 else
3056 IID = Intrinsic::x86_avx512_div_pd_512;
3057
3058 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
3059 { CI->getArgOperand(0), CI->getArgOperand(1),
3060 CI->getArgOperand(4) });
3061 } else {
3062 Rep = Builder.CreateFDiv(CI->getArgOperand(0), CI->getArgOperand(1));
3063 }
3064 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3065 CI->getArgOperand(2));
3066 } else if (IsX86 && Name.startswith("avx512.mask.mul.p")) {
3067 if (Name.endswith(".512")) {
3068 Intrinsic::ID IID;
3069 if (Name[17] == 's')
3070 IID = Intrinsic::x86_avx512_mul_ps_512;
3071 else
3072 IID = Intrinsic::x86_avx512_mul_pd_512;
3073
3074 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
3075 { CI->getArgOperand(0), CI->getArgOperand(1),
3076 CI->getArgOperand(4) });
3077 } else {
3078 Rep = Builder.CreateFMul(CI->getArgOperand(0), CI->getArgOperand(1));
3079 }
3080 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3081 CI->getArgOperand(2));
3082 } else if (IsX86 && Name.startswith("avx512.mask.sub.p")) {
3083 if (Name.endswith(".512")) {
3084 Intrinsic::ID IID;
3085 if (Name[17] == 's')
3086 IID = Intrinsic::x86_avx512_sub_ps_512;
3087 else
3088 IID = Intrinsic::x86_avx512_sub_pd_512;
3089
3090 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
3091 { CI->getArgOperand(0), CI->getArgOperand(1),
3092 CI->getArgOperand(4) });
3093 } else {
3094 Rep = Builder.CreateFSub(CI->getArgOperand(0), CI->getArgOperand(1));
3095 }
3096 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3097 CI->getArgOperand(2));
3098 } else if (IsX86 && (Name.startswith("avx512.mask.max.p") ||
3099 Name.startswith("avx512.mask.min.p")) &&
3100 Name.drop_front(18) == ".512") {
3101 bool IsDouble = Name[17] == 'd';
3102 bool IsMin = Name[13] == 'i';
3103 static const Intrinsic::ID MinMaxTbl[2][2] = {
3104 { Intrinsic::x86_avx512_max_ps_512, Intrinsic::x86_avx512_max_pd_512 },
3105 { Intrinsic::x86_avx512_min_ps_512, Intrinsic::x86_avx512_min_pd_512 }
3106 };
3107 Intrinsic::ID IID = MinMaxTbl[IsMin][IsDouble];
3108
3109 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
3110 { CI->getArgOperand(0), CI->getArgOperand(1),
3111 CI->getArgOperand(4) });
3112 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
3113 CI->getArgOperand(2));
3114 } else if (IsX86 && Name.startswith("avx512.mask.lzcnt.")) {
3115 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(),
3116 Intrinsic::ctlz,
3117 CI->getType()),
3118 { CI->getArgOperand(0), Builder.getInt1(false) });
3119 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
3120 CI->getArgOperand(1));
3121 } else if (IsX86 && Name.startswith("avx512.mask.psll")) {
3122 bool IsImmediate = Name[16] == 'i' ||
3123 (Name.size() > 18 && Name[18] == 'i');
3124 bool IsVariable = Name[16] == 'v';
3125 char Size = Name[16] == '.' ? Name[17] :
3126 Name[17] == '.' ? Name[18] :
3127 Name[18] == '.' ? Name[19] :
3128 Name[20];
3129
3130 Intrinsic::ID IID;
3131 if (IsVariable && Name[17] != '.') {
3132 if (Size == 'd' && Name[17] == '2') // avx512.mask.psllv2.di
3133 IID = Intrinsic::x86_avx2_psllv_q;
3134 else if (Size == 'd' && Name[17] == '4') // avx512.mask.psllv4.di
3135 IID = Intrinsic::x86_avx2_psllv_q_256;
3136 else if (Size == 's' && Name[17] == '4') // avx512.mask.psllv4.si
3137 IID = Intrinsic::x86_avx2_psllv_d;
3138 else if (Size == 's' && Name[17] == '8') // avx512.mask.psllv8.si
3139 IID = Intrinsic::x86_avx2_psllv_d_256;
3140 else if (Size == 'h' && Name[17] == '8') // avx512.mask.psllv8.hi
3141 IID = Intrinsic::x86_avx512_psllv_w_128;
3142 else if (Size == 'h' && Name[17] == '1') // avx512.mask.psllv16.hi
3143 IID = Intrinsic::x86_avx512_psllv_w_256;
3144 else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psllv32hi
3145 IID = Intrinsic::x86_avx512_psllv_w_512;
3146 else
3147 llvm_unreachable("Unexpected size")::llvm::llvm_unreachable_internal("Unexpected size", "llvm/lib/IR/AutoUpgrade.cpp"
, 3147)
;
3148 } else if (Name.endswith(".128")) {
3149 if (Size == 'd') // avx512.mask.psll.d.128, avx512.mask.psll.di.128
3150 IID = IsImmediate ? Intrinsic::x86_sse2_pslli_d
3151 : Intrinsic::x86_sse2_psll_d;
3152 else if (Size == 'q') // avx512.mask.psll.q.128, avx512.mask.psll.qi.128
3153 IID = IsImmediate ? Intrinsic::x86_sse2_pslli_q
3154 : Intrinsic::x86_sse2_psll_q;
3155 else if (Size == 'w') // avx512.mask.psll.w.128, avx512.mask.psll.wi.128
3156 IID = IsImmediate ? Intrinsic::x86_sse2_pslli_w
3157 : Intrinsic::x86_sse2_psll_w;
3158 else
3159 llvm_unreachable("Unexpected size")::llvm::llvm_unreachable_internal("Unexpected size", "llvm/lib/IR/AutoUpgrade.cpp"
, 3159)
;
3160 } else if (Name.endswith(".256")) {
3161 if (Size == 'd') // avx512.mask.psll.d.256, avx512.mask.psll.di.256
3162 IID = IsImmediate ? Intrinsic::x86_avx2_pslli_d
3163 : Intrinsic::x86_avx2_psll_d;
3164 else if (Size == 'q') // avx512.mask.psll.q.256, avx512.mask.psll.qi.256
3165 IID = IsImmediate ? Intrinsic::x86_avx2_pslli_q
3166 : Intrinsic::x86_avx2_psll_q;
3167 else if (Size == 'w') // avx512.mask.psll.w.256, avx512.mask.psll.wi.256
3168 IID = IsImmediate ? Intrinsic::x86_avx2_pslli_w
3169 : Intrinsic::x86_avx2_psll_w;
3170 else
3171 llvm_unreachable("Unexpected size")::llvm::llvm_unreachable_internal("Unexpected size", "llvm/lib/IR/AutoUpgrade.cpp"
, 3171)
;
3172 } else {
3173 if (Size == 'd') // psll.di.512, pslli.d, psll.d, psllv.d.512
3174 IID = IsImmediate ? Intrinsic::x86_avx512_pslli_d_512 :
3175 IsVariable ? Intrinsic::x86_avx512_psllv_d_512 :
3176 Intrinsic::x86_avx512_psll_d_512;
3177 else if (Size == 'q') // psll.qi.512, pslli.q, psll.q, psllv.q.512
3178 IID = IsImmediate ? Intrinsic::x86_avx512_pslli_q_512 :
3179 IsVariable ? Intrinsic::x86_avx512_psllv_q_512 :
3180 Intrinsic::x86_avx512_psll_q_512;
3181 else if (Size == 'w') // psll.wi.512, pslli.w, psll.w
3182 IID = IsImmediate ? Intrinsic::x86_avx512_pslli_w_512
3183 : Intrinsic::x86_avx512_psll_w_512;
3184 else
3185 llvm_unreachable("Unexpected size")::llvm::llvm_unreachable_internal("Unexpected size", "llvm/lib/IR/AutoUpgrade.cpp"
, 3185)
;
3186 }
3187
3188 Rep = UpgradeX86MaskedShift(Builder, *CI, IID);
3189 } else if (IsX86 && Name.startswith("avx512.mask.psrl")) {
3190 bool IsImmediate = Name[16] == 'i' ||
3191 (Name.size() > 18 && Name[18] == 'i');
3192 bool IsVariable = Name[16] == 'v';
3193 char Size = Name[16] == '.' ? Name[17] :
3194 Name[17] == '.' ? Name[18] :
3195 Name[18] == '.' ? Name[19] :
3196 Name[20];
3197
3198 Intrinsic::ID IID;
3199 if (IsVariable && Name[17] != '.') {
3200 if (Size == 'd' && Name[17] == '2') // avx512.mask.psrlv2.di
3201 IID = Intrinsic::x86_avx2_psrlv_q;
3202 else if (Size == 'd' && Name[17] == '4') // avx512.mask.psrlv4.di
3203 IID = Intrinsic::x86_avx2_psrlv_q_256;
3204 else if (Size == 's' && Name[17] == '4') // avx512.mask.psrlv4.si
3205 IID = Intrinsic::x86_avx2_psrlv_d;
3206 else if (Size == 's' && Name[17] == '8') // avx512.mask.psrlv8.si
3207 IID = Intrinsic::x86_avx2_psrlv_d_256;
3208 else if (Size == 'h' && Name[17] == '8') // avx512.mask.psrlv8.hi
3209 IID = Intrinsic::x86_avx512_psrlv_w_128;
3210 else if (Size == 'h' && Name[17] == '1') // avx512.mask.psrlv16.hi
3211 IID = Intrinsic::x86_avx512_psrlv_w_256;
3212 else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psrlv32hi
3213 IID = Intrinsic::x86_avx512_psrlv_w_512;
3214 else
3215 llvm_unreachable("Unexpected size")::llvm::llvm_unreachable_internal("Unexpected size", "llvm/lib/IR/AutoUpgrade.cpp"
, 3215)
;
3216 } else if (Name.endswith(".128")) {
3217 if (Size == 'd') // avx512.mask.psrl.d.128, avx512.mask.psrl.di.128
3218 IID = IsImmediate ? Intrinsic::x86_sse2_psrli_d
3219 : Intrinsic::x86_sse2_psrl_d;
3220 else if (Size == 'q') // avx512.mask.psrl.q.128, avx512.mask.psrl.qi.128
3221 IID = IsImmediate ? Intrinsic::x86_sse2_psrli_q
3222 : Intrinsic::x86_sse2_psrl_q;
3223 else if (Size == 'w') // avx512.mask.psrl.w.128, avx512.mask.psrl.wi.128
3224 IID = IsImmediate ? Intrinsic::x86_sse2_psrli_w
3225 : Intrinsic::x86_sse2_psrl_w;
3226 else
3227 llvm_unreachable("Unexpected size")::llvm::llvm_unreachable_internal("Unexpected size", "llvm/lib/IR/AutoUpgrade.cpp"
, 3227)
;
3228 } else if (Name.endswith(".256")) {
3229 if (Size == 'd') // avx512.mask.psrl.d.256, avx512.mask.psrl.di.256
3230 IID = IsImmediate ? Intrinsic::x86_avx2_psrli_d
3231 : Intrinsic::x86_avx2_psrl_d;
3232 else if (Size == 'q') // avx512.mask.psrl.q.256, avx512.mask.psrl.qi.256
3233 IID = IsImmediate ? Intrinsic::x86_avx2_psrli_q
3234 : Intrinsic::x86_avx2_psrl_q;
3235 else if (Size == 'w') // avx512.mask.psrl.w.256, avx512.mask.psrl.wi.256
3236 IID = IsImmediate ? Intrinsic::x86_avx2_psrli_w
3237 : Intrinsic::x86_avx2_psrl_w;
3238 else
3239 llvm_unreachable("Unexpected size")::llvm::llvm_unreachable_internal("Unexpected size", "llvm/lib/IR/AutoUpgrade.cpp"
, 3239)
;
3240 } else {
3241 if (Size == 'd') // psrl.di.512, psrli.d, psrl.d, psrl.d.512
3242 IID = IsImmediate ? Intrinsic::x86_avx512_psrli_d_512 :
3243 IsVariable ? Intrinsic::x86_avx512_psrlv_d_512 :
3244 Intrinsic::x86_avx512_psrl_d_512;
3245 else if (Size == 'q') // psrl.qi.512, psrli.q, psrl.q, psrl.q.512
3246 IID = IsImmediate ? Intrinsic::x86_avx512_psrli_q_512 :
3247 IsVariable ? Intrinsic::x86_avx512_psrlv_q_512 :
3248 Intrinsic::x86_avx512_psrl_q_512;
3249 else if (Size == 'w') // psrl.wi.512, psrli.w, psrl.w)
3250 IID = IsImmediate ? Intrinsic::x86_avx512_psrli_w_512
3251 : Intrinsic::x86_avx512_psrl_w_512;
3252 else
3253 llvm_unreachable("Unexpected size")::llvm::llvm_unreachable_internal("Unexpected size", "llvm/lib/IR/AutoUpgrade.cpp"
, 3253)
;
3254 }
3255
3256 Rep = UpgradeX86MaskedShift(Builder, *CI, IID);
3257 } else if (IsX86 && Name.startswith("avx512.mask.psra")) {
3258 bool IsImmediate = Name[16] == 'i' ||
3259 (Name.size() > 18 && Name[18] == 'i');
3260 bool IsVariable = Name[16] == 'v';
3261 char Size = Name[16] == '.' ? Name[17] :
3262 Name[17] == '.' ? Name[18] :
3263 Name[18] == '.' ? Name[19] :
3264 Name[20];
3265
3266 Intrinsic::ID IID;
3267 if (IsVariable && Name[17] != '.') {
3268 if (Size == 's' && Name[17] == '4') // avx512.mask.psrav4.si
3269 IID = Intrinsic::x86_avx2_psrav_d;
3270 else if (Size == 's' && Name[17] == '8') // avx512.mask.psrav8.si
3271 IID = Intrinsic::x86_avx2_psrav_d_256;
3272 else if (Size == 'h' && Name[17] == '8') // avx512.mask.psrav8.hi
3273 IID = Intrinsic::x86_avx512_psrav_w_128;
3274 else if (Size == 'h' && Name[17] == '1') // avx512.mask.psrav16.hi
3275 IID = Intrinsic::x86_avx512_psrav_w_256;
3276 else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psrav32hi
3277 IID = Intrinsic::x86_avx512_psrav_w_512;
3278 else
3279 llvm_unreachable("Unexpected size")::llvm::llvm_unreachable_internal("Unexpected size", "llvm/lib/IR/AutoUpgrade.cpp"
, 3279)
;
3280 } else if (Name.endswith(".128")) {
3281 if (Size == 'd') // avx512.mask.psra.d.128, avx512.mask.psra.di.128
3282 IID = IsImmediate ? Intrinsic::x86_sse2_psrai_d
3283 : Intrinsic::x86_sse2_psra_d;
3284 else if (Size == 'q') // avx512.mask.psra.q.128, avx512.mask.psra.qi.128
3285 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_128 :
3286 IsVariable ? Intrinsic::x86_avx512_psrav_q_128 :
3287 Intrinsic::x86_avx512_psra_q_128;
3288 else if (Size == 'w') // avx512.mask.psra.w.128, avx512.mask.psra.wi.128
3289 IID = IsImmediate ? Intrinsic::x86_sse2_psrai_w
3290 : Intrinsic::x86_sse2_psra_w;
3291 else
3292 llvm_unreachable("Unexpected size")::llvm::llvm_unreachable_internal("Unexpected size", "llvm/lib/IR/AutoUpgrade.cpp"
, 3292)
;
3293 } else if (Name.endswith(".256")) {
3294 if (Size == 'd') // avx512.mask.psra.d.256, avx512.mask.psra.di.256
3295 IID = IsImmediate ? Intrinsic::x86_avx2_psrai_d
3296 : Intrinsic::x86_avx2_psra_d;
3297 else if (Size == 'q') // avx512.mask.psra.q.256, avx512.mask.psra.qi.256
3298 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_256 :
3299 IsVariable ? Intrinsic::x86_avx512_psrav_q_256 :
3300 Intrinsic::x86_avx512_psra_q_256;
3301 else if (Size == 'w') // avx512.mask.psra.w.256, avx512.mask.psra.wi.256
3302 IID = IsImmediate ? Intrinsic::x86_avx2_psrai_w
3303 : Intrinsic::x86_avx2_psra_w;
3304 else
3305 llvm_unreachable("Unexpected size")::llvm::llvm_unreachable_internal("Unexpected size", "llvm/lib/IR/AutoUpgrade.cpp"
, 3305)
;
3306 } else {
3307 if (Size == 'd') // psra.di.512, psrai.d, psra.d, psrav.d.512
3308 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_d_512 :
3309 IsVariable ? Intrinsic::x86_avx512_psrav_d_512 :
3310 Intrinsic::x86_avx512_psra_d_512;
3311 else if (Size == 'q') // psra.qi.512, psrai.q, psra.q
3312 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_512 :
3313 IsVariable ? Intrinsic::x86_avx512_psrav_q_512 :
3314 Intrinsic::x86_avx512_psra_q_512;
3315 else if (Size == 'w') // psra.wi.512, psrai.w, psra.w
3316 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_w_512
3317 : Intrinsic::x86_avx512_psra_w_512;
3318 else
3319 llvm_unreachable("Unexpected size")::llvm::llvm_unreachable_internal("Unexpected size", "llvm/lib/IR/AutoUpgrade.cpp"
, 3319)
;
3320 }
3321
3322 Rep = UpgradeX86MaskedShift(Builder, *CI, IID);
3323 } else if (IsX86 && Name.startswith("avx512.mask.move.s")) {
3324 Rep = upgradeMaskedMove(Builder, *CI);
3325 } else if (IsX86 && Name.startswith("avx512.cvtmask2")) {
3326 Rep = UpgradeMaskToInt(Builder, *CI);
3327 } else if (IsX86 && Name.endswith(".movntdqa")) {
3328 Module *M = F->getParent();
3329 MDNode *Node = MDNode::get(
3330 C, ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1)));
3331
3332 Value *Ptr = CI->getArgOperand(0);
3333
3334 // Convert the type of the pointer to a pointer to the stored type.
3335 Value *BC = Builder.CreateBitCast(
3336 Ptr, PointerType::getUnqual(CI->getType()), "cast");
3337 LoadInst *LI = Builder.CreateAlignedLoad(
3338 CI->getType(), BC,
3339 Align(CI->getType()->getPrimitiveSizeInBits().getFixedSize() / 8));
3340 LI->setMetadata(M->getMDKindID("nontemporal"), Node);
3341 Rep = LI;
3342 } else if (IsX86 && (Name.startswith("fma.vfmadd.") ||
3343 Name.startswith("fma.vfmsub.") ||
3344 Name.startswith("fma.vfnmadd.") ||
3345 Name.startswith("fma.vfnmsub."))) {
3346 bool NegMul = Name[6] == 'n';
3347 bool NegAcc = NegMul ? Name[8] == 's' : Name[7] == 's';
3348 bool IsScalar = NegMul ? Name[12] == 's' : Name[11] == 's';
3349
3350 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1),
3351 CI->getArgOperand(2) };
3352
3353 if (IsScalar) {
3354 Ops[0] = Builder.CreateExtractElement(Ops[0], (uint64_t)0);
3355 Ops[1] = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
3356 Ops[2] = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
3357 }
3358
3359 if (NegMul && !IsScalar)
3360 Ops[0] = Builder.CreateFNeg(Ops[0]);
3361 if (NegMul && IsScalar)
3362 Ops[1] = Builder.CreateFNeg(Ops[1]);
3363 if (NegAcc)
3364 Ops[2] = Builder.CreateFNeg(Ops[2]);
3365
3366 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(),
3367 Intrinsic::fma,
3368 Ops[0]->getType()),
3369 Ops);
3370
3371 if (IsScalar)
3372 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep,
3373 (uint64_t)0);
3374 } else if (IsX86 && Name.startswith("fma4.vfmadd.s")) {
3375 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1),
3376 CI->getArgOperand(2) };
3377
3378 Ops[0] = Builder.CreateExtractElement(Ops[0], (uint64_t)0);
3379 Ops[1] = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
3380 Ops[2] = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
3381
3382 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(),
3383 Intrinsic::fma,
3384 Ops[0]->getType()),
3385 Ops);
3386
3387 Rep = Builder.CreateInsertElement(Constant::getNullValue(CI->getType()),
3388 Rep, (uint64_t)0);
3389 } else if (IsX86 && (Name.startswith("avx512.mask.vfmadd.s") ||
3390 Name.startswith("avx512.maskz.vfmadd.s") ||
3391 Name.startswith("avx512.mask3.vfmadd.s") ||
3392 Name.startswith("avx512.mask3.vfmsub.s") ||
3393 Name.startswith("avx512.mask3.vfnmsub.s"))) {
3394 bool IsMask3 = Name[11] == '3';
3395 bool IsMaskZ = Name[11] == 'z';
3396 // Drop the "avx512.mask." to make it easier.
3397 Name = Name.drop_front(IsMask3 || IsMaskZ ? 13 : 12);
3398 bool NegMul = Name[2] == 'n';
3399 bool NegAcc = NegMul ? Name[4] == 's' : Name[3] == 's';
3400
3401 Value *A = CI->getArgOperand(0);
3402 Value *B = CI->getArgOperand(1);
3403 Value *C = CI->getArgOperand(2);
3404
3405 if (NegMul && (IsMask3 || IsMaskZ))
3406 A = Builder.CreateFNeg(A);
3407 if (NegMul && !(IsMask3 || IsMaskZ))
3408 B = Builder.CreateFNeg(B);
3409 if (NegAcc)
3410 C = Builder.CreateFNeg(C);
3411
3412 A = Builder.CreateExtractElement(A, (uint64_t)0);
3413 B = Builder.CreateExtractElement(B, (uint64_t)0);
3414 C = Builder.CreateExtractElement(C, (uint64_t)0);
3415
3416 if (!isa<ConstantInt>(CI->getArgOperand(4)) ||
3417 cast<ConstantInt>(CI->getArgOperand(4))->getZExtValue() != 4) {
3418 Value *Ops[] = { A, B, C, CI->getArgOperand(4) };
3419
3420 Intrinsic::ID IID;
3421 if (Name.back() == 'd')
3422 IID = Intrinsic::x86_avx512_vfmadd_f64;
3423 else
3424 IID = Intrinsic::x86_avx512_vfmadd_f32;
3425 Function *FMA = Intrinsic::getDeclaration(CI->getModule(), IID);
3426 Rep = Builder.CreateCall(FMA, Ops);
3427 } else {
3428 Function *FMA = Intrinsic::getDeclaration(CI->getModule(),
3429 Intrinsic::fma,
3430 A->getType());
3431 Rep = Builder.CreateCall(FMA, { A, B, C });
3432 }
3433
3434 Value *PassThru = IsMaskZ ? Constant::getNullValue(Rep->getType()) :
3435 IsMask3 ? C : A;
3436
3437 // For Mask3 with NegAcc, we need to create a new extractelement that
3438 // avoids the negation above.
3439 if (NegAcc && IsMask3)
3440 PassThru = Builder.CreateExtractElement(CI->getArgOperand(2),
3441 (uint64_t)0);
3442
3443 Rep = EmitX86ScalarSelect(Builder, CI->getArgOperand(3),
3444 Rep, PassThru);
3445 Rep = Builder.CreateInsertElement(CI->getArgOperand(IsMask3 ? 2 : 0),
3446 Rep, (uint64_t)0);
3447 } else if (IsX86 && (Name.startswith("avx512.mask.vfmadd.p") ||
3448 Name.startswith("avx512.mask.vfnmadd.p") ||
3449 Name.startswith("avx512.mask.vfnmsub.p") ||
3450 Name.startswith("avx512.mask3.vfmadd.p") ||
3451 Name.startswith("avx512.mask3.vfmsub.p") ||
3452 Name.startswith("avx512.mask3.vfnmsub.p") ||
3453 Name.startswith("avx512.maskz.vfmadd.p"))) {
3454 bool IsMask3 = Name[11] == '3';
3455 bool IsMaskZ = Name[11] == 'z';
3456 // Drop the "avx512.mask." to make it easier.
3457 Name = Name.drop_front(IsMask3 || IsMaskZ ? 13 : 12);
3458 bool NegMul = Name[2] == 'n';
3459 bool NegAcc = NegMul ? Name[4] == 's' : Name[3] == 's';
3460
3461 Value *A = CI->getArgOperand(0);
3462 Value *B = CI->getArgOperand(1);
3463 Value *C = CI->getArgOperand(2);
3464
3465 if (NegMul && (IsMask3 || IsMaskZ))
3466 A = Builder.CreateFNeg(A);
3467 if (NegMul && !(IsMask3 || IsMaskZ))
3468 B = Builder.CreateFNeg(B);
3469 if (NegAcc)
3470 C = Builder.CreateFNeg(C);
3471
3472 if (CI->arg_size() == 5 &&
3473 (!isa<ConstantInt>(CI->getArgOperand(4)) ||
3474 cast<ConstantInt>(CI->getArgOperand(4))->getZExtValue() != 4)) {
3475 Intrinsic::ID IID;
3476 // Check the character before ".512" in string.
3477 if (Name[Name.size()-5] == 's')
3478 IID = Intrinsic::x86_avx512_vfmadd_ps_512;
3479 else
3480 IID = Intrinsic::x86_avx512_vfmadd_pd_512;
3481
3482 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
3483 { A, B, C, CI->getArgOperand(4) });
3484 } else {
3485 Function *FMA = Intrinsic::getDeclaration(CI->getModule(),
3486 Intrinsic::fma,
3487 A->getType());
3488 Rep = Builder.CreateCall(FMA, { A, B, C });
3489 }
3490
3491 Value *PassThru = IsMaskZ ? llvm::Constant::getNullValue(CI->getType()) :
3492 IsMask3 ? CI->getArgOperand(2) :
3493 CI->getArgOperand(0);
3494
3495 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
3496 } else if (IsX86 && Name.startswith("fma.vfmsubadd.p")) {
3497 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
3498 unsigned EltWidth = CI->getType()->getScalarSizeInBits();
3499 Intrinsic::ID IID;
3500 if (VecWidth == 128 && EltWidth == 32)
3501 IID = Intrinsic::x86_fma_vfmaddsub_ps;
3502 else if (VecWidth == 256 && EltWidth == 32)
3503 IID = Intrinsic::x86_fma_vfmaddsub_ps_256;
3504 else if (VecWidth == 128 && EltWidth == 64)
3505 IID = Intrinsic::x86_fma_vfmaddsub_pd;
3506 else if (VecWidth == 256 && EltWidth == 64)
3507 IID = Intrinsic::x86_fma_vfmaddsub_pd_256;
3508 else
3509 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 3509)
;
3510
3511 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1),
3512 CI->getArgOperand(2) };
3513 Ops[2] = Builder.CreateFNeg(Ops[2]);
3514 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
3515 Ops);
3516 } else if (IsX86 && (Name.startswith("avx512.mask.vfmaddsub.p") ||
3517 Name.startswith("avx512.mask3.vfmaddsub.p") ||
3518 Name.startswith("avx512.maskz.vfmaddsub.p") ||
3519 Name.startswith("avx512.mask3.vfmsubadd.p"))) {
3520 bool IsMask3 = Name[11] == '3';
3521 bool IsMaskZ = Name[11] == 'z';
3522 // Drop the "avx512.mask." to make it easier.
3523 Name = Name.drop_front(IsMask3 || IsMaskZ ? 13 : 12);
3524 bool IsSubAdd = Name[3] == 's';
3525 if (CI->arg_size() == 5) {
3526 Intrinsic::ID IID;
3527 // Check the character before ".512" in string.
3528 if (Name[Name.size()-5] == 's')
3529 IID = Intrinsic::x86_avx512_vfmaddsub_ps_512;
3530 else
3531 IID = Intrinsic::x86_avx512_vfmaddsub_pd_512;
3532
3533 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1),
3534 CI->getArgOperand(2), CI->getArgOperand(4) };
3535 if (IsSubAdd)
3536 Ops[2] = Builder.CreateFNeg(Ops[2]);
3537
3538 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
3539 Ops);
3540 } else {
3541 int NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
3542
3543 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1),
3544 CI->getArgOperand(2) };
3545
3546 Function *FMA = Intrinsic::getDeclaration(CI->getModule(), Intrinsic::fma,
3547 Ops[0]->getType());
3548 Value *Odd = Builder.CreateCall(FMA, Ops);
3549 Ops[2] = Builder.CreateFNeg(Ops[2]);
3550 Value *Even = Builder.CreateCall(FMA, Ops);
3551
3552 if (IsSubAdd)
3553 std::swap(Even, Odd);
3554
3555 SmallVector<int, 32> Idxs(NumElts);
3556 for (int i = 0; i != NumElts; ++i)
3557 Idxs[i] = i + (i % 2) * NumElts;
3558
3559 Rep = Builder.CreateShuffleVector(Even, Odd, Idxs);
3560 }
3561
3562 Value *PassThru = IsMaskZ ? llvm::Constant::getNullValue(CI->getType()) :
3563 IsMask3 ? CI->getArgOperand(2) :
3564 CI->getArgOperand(0);
3565
3566 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
3567 } else if (IsX86 && (Name.startswith("avx512.mask.pternlog.") ||
3568 Name.startswith("avx512.maskz.pternlog."))) {
3569 bool ZeroMask = Name[11] == 'z';
3570 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
3571 unsigned EltWidth = CI->getType()->getScalarSizeInBits();
3572 Intrinsic::ID IID;
3573 if (VecWidth == 128 && EltWidth == 32)
3574 IID = Intrinsic::x86_avx512_pternlog_d_128;
3575 else if (VecWidth == 256 && EltWidth == 32)
3576 IID = Intrinsic::x86_avx512_pternlog_d_256;
3577 else if (VecWidth == 512 && EltWidth == 32)
3578 IID = Intrinsic::x86_avx512_pternlog_d_512;
3579 else if (VecWidth == 128 && EltWidth == 64)
3580 IID = Intrinsic::x86_avx512_pternlog_q_128;
3581 else if (VecWidth == 256 && EltWidth == 64)
3582 IID = Intrinsic::x86_avx512_pternlog_q_256;
3583 else if (VecWidth == 512 && EltWidth == 64)
3584 IID = Intrinsic::x86_avx512_pternlog_q_512;
3585 else
3586 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 3586)
;
3587
3588 Value *Args[] = { CI->getArgOperand(0) , CI->getArgOperand(1),
3589 CI->getArgOperand(2), CI->getArgOperand(3) };
3590 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID),
3591 Args);
3592 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType())
3593 : CI->getArgOperand(0);
3594 Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep, PassThru);
3595 } else if (IsX86 && (Name.startswith("avx512.mask.vpmadd52") ||
3596 Name.startswith("avx512.maskz.vpmadd52"))) {
3597 bool ZeroMask = Name[11] == 'z';
3598 bool High = Name[20] == 'h' || Name[21] == 'h';
3599 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
3600 Intrinsic::ID IID;
3601 if (VecWidth == 128 && !High)
3602 IID = Intrinsic::x86_avx512_vpmadd52l_uq_128;
3603 else if (VecWidth == 256 && !High)
3604 IID = Intrinsic::x86_avx512_vpmadd52l_uq_256;
3605 else if (VecWidth == 512 && !High)
3606 IID = Intrinsic::x86_avx512_vpmadd52l_uq_512;
3607 else if (VecWidth == 128 && High)
3608 IID = Intrinsic::x86_avx512_vpmadd52h_uq_128;
3609 else if (VecWidth == 256 && High)
3610 IID = Intrinsic::x86_avx512_vpmadd52h_uq_256;
3611 else if (VecWidth == 512 && High)
3612 IID = Intrinsic::x86_avx512_vpmadd52h_uq_512;
3613 else
3614 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 3614)
;
3615
3616 Value *Args[] = { CI->getArgOperand(0) , CI->getArgOperand(1),
3617 CI->getArgOperand(2) };
3618 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID),
3619 Args);
3620 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType())
3621 : CI->getArgOperand(0);
3622 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
3623 } else if (IsX86 && (Name.startswith("avx512.mask.vpermi2var.") ||
3624 Name.startswith("avx512.mask.vpermt2var.") ||
3625 Name.startswith("avx512.maskz.vpermt2var."))) {
3626 bool ZeroMask = Name[11] == 'z';
3627 bool IndexForm = Name[17] == 'i';
3628 Rep = UpgradeX86VPERMT2Intrinsics(Builder, *CI, ZeroMask, IndexForm);
3629 } else if (IsX86 && (Name.startswith("avx512.mask.vpdpbusd.") ||
3630 Name.startswith("avx512.maskz.vpdpbusd.") ||
3631 Name.startswith("avx512.mask.vpdpbusds.") ||
3632 Name.startswith("avx512.maskz.vpdpbusds."))) {
3633 bool ZeroMask = Name[11] == 'z';
3634 bool IsSaturating = Name[ZeroMask ? 21 : 20] == 's';
3635 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
3636 Intrinsic::ID IID;
3637 if (VecWidth == 128 && !IsSaturating)
3638 IID = Intrinsic::x86_avx512_vpdpbusd_128;
3639 else if (VecWidth == 256 && !IsSaturating)
3640 IID = Intrinsic::x86_avx512_vpdpbusd_256;
3641 else if (VecWidth == 512 && !IsSaturating)
3642 IID = Intrinsic::x86_avx512_vpdpbusd_512;
3643 else if (VecWidth == 128 && IsSaturating)
3644 IID = Intrinsic::x86_avx512_vpdpbusds_128;
3645 else if (VecWidth == 256 && IsSaturating)
3646 IID = Intrinsic::x86_avx512_vpdpbusds_256;
3647 else if (VecWidth == 512 && IsSaturating)
3648 IID = Intrinsic::x86_avx512_vpdpbusds_512;
3649 else
3650 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 3650)
;
3651
3652 Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(1),
3653 CI->getArgOperand(2) };
3654 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID),
3655 Args);
3656 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType())
3657 : CI->getArgOperand(0);
3658 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
3659 } else if (IsX86 && (Name.startswith("avx512.mask.vpdpwssd.") ||
3660 Name.startswith("avx512.maskz.vpdpwssd.") ||
3661 Name.startswith("avx512.mask.vpdpwssds.") ||
3662 Name.startswith("avx512.maskz.vpdpwssds."))) {
3663 bool ZeroMask = Name[11] == 'z';
3664 bool IsSaturating = Name[ZeroMask ? 21 : 20] == 's';
3665 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
3666 Intrinsic::ID IID;
3667 if (VecWidth == 128 && !IsSaturating)
3668 IID = Intrinsic::x86_avx512_vpdpwssd_128;
3669 else if (VecWidth == 256 && !IsSaturating)
3670 IID = Intrinsic::x86_avx512_vpdpwssd_256;
3671 else if (VecWidth == 512 && !IsSaturating)
3672 IID = Intrinsic::x86_avx512_vpdpwssd_512;
3673 else if (VecWidth == 128 && IsSaturating)
3674 IID = Intrinsic::x86_avx512_vpdpwssds_128;
3675 else if (VecWidth == 256 && IsSaturating)
3676 IID = Intrinsic::x86_avx512_vpdpwssds_256;
3677 else if (VecWidth == 512 && IsSaturating)
3678 IID = Intrinsic::x86_avx512_vpdpwssds_512;
3679 else
3680 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 3680)
;
3681
3682 Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(1),
3683 CI->getArgOperand(2) };
3684 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID),
3685 Args);
3686 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType())
3687 : CI->getArgOperand(0);
3688 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
3689 } else if (IsX86 && (Name == "addcarryx.u32" || Name == "addcarryx.u64" ||
3690 Name == "addcarry.u32" || Name == "addcarry.u64" ||
3691 Name == "subborrow.u32" || Name == "subborrow.u64")) {
3692 Intrinsic::ID IID;
3693 if (Name[0] == 'a' && Name.back() == '2')
3694 IID = Intrinsic::x86_addcarry_32;
3695 else if (Name[0] == 'a' && Name.back() == '4')
3696 IID = Intrinsic::x86_addcarry_64;
3697 else if (Name[0] == 's' && Name.back() == '2')
3698 IID = Intrinsic::x86_subborrow_32;
3699 else if (Name[0] == 's' && Name.back() == '4')
3700 IID = Intrinsic::x86_subborrow_64;
3701 else
3702 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/IR/AutoUpgrade.cpp"
, 3702)
;
3703
3704 // Make a call with 3 operands.
3705 Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(1),
3706 CI->getArgOperand(2)};
3707 Value *NewCall = Builder.CreateCall(
3708 Intrinsic::getDeclaration(CI->getModule(), IID),
3709 Args);
3710
3711 // Extract the second result and store it.
3712 Value *Data = Builder.CreateExtractValue(NewCall, 1);
3713 // Cast the pointer to the right type.
3714 Value *Ptr = Builder.CreateBitCast(CI->getArgOperand(3),
3715 llvm::PointerType::getUnqual(Data->getType()));
3716 Builder.CreateAlignedStore(Data, Ptr, Align(1));
3717 // Replace the original call result with the first result of the new call.
3718 Value *CF = Builder.CreateExtractValue(NewCall, 0);
3719
3720 CI->replaceAllUsesWith(CF);
3721 Rep = nullptr;
3722 } else if (IsX86 && Name.startswith("avx512.mask.") &&
3723 upgradeAVX512MaskToSelect(Name, Builder, *CI, Rep)) {
3724 // Rep will be updated by the call in the condition.
3725 } else if (IsNVVM && (Name == "abs.i" || Name == "abs.ll")) {
3726 Value *Arg = CI->getArgOperand(0);
3727 Value *Neg = Builder.CreateNeg(Arg, "neg");
3728 Value *Cmp = Builder.CreateICmpSGE(
3729 Arg, llvm::Constant::getNullValue(Arg->getType()), "abs.cond");
3730 Rep = Builder.CreateSelect(Cmp, Arg, Neg, "abs");
3731 } else if (IsNVVM && (Name.startswith("atomic.load.add.f32.p") ||
3732 Name.startswith("atomic.load.add.f64.p"))) {
3733 Value *Ptr = CI->getArgOperand(0);
3734 Value *Val = CI->getArgOperand(1);
3735 Rep = Builder.CreateAtomicRMW(AtomicRMWInst::FAdd, Ptr, Val, MaybeAlign(),
3736 AtomicOrdering::SequentiallyConsistent);
3737 } else if (IsNVVM && (Name == "max.i" || Name == "max.ll" ||
3738 Name == "max.ui" || Name == "max.ull")) {
3739 Value *Arg0 = CI->getArgOperand(0);
3740 Value *Arg1 = CI->getArgOperand(1);
3741 Value *Cmp = Name.endswith(".ui") || Name.endswith(".ull")
3742 ? Builder.CreateICmpUGE(Arg0, Arg1, "max.cond")
3743 : Builder.CreateICmpSGE(Arg0, Arg1, "max.cond");
3744 Rep = Builder.CreateSelect(Cmp, Arg0, Arg1, "max");
3745 } else if (IsNVVM && (Name == "min.i" || Name == "min.ll" ||
3746 Name == "min.ui" || Name == "min.ull")) {
3747 Value *Arg0 = CI->getArgOperand(0);
3748 Value *Arg1 = CI->getArgOperand(1);
3749 Value *Cmp = Name.endswith(".ui") || Name.endswith(".ull")
3750 ? Builder.CreateICmpULE(Arg0, Arg1, "min.cond")
3751 : Builder.CreateICmpSLE(Arg0, Arg1, "min.cond");
3752 Rep = Builder.CreateSelect(Cmp, Arg0, Arg1, "min");
3753 } else if (IsNVVM && Name == "clz.ll") {
3754 // llvm.nvvm.clz.ll returns an i32, but llvm.ctlz.i64 and returns an i64.
3755 Value *Arg = CI->getArgOperand(0);
3756 Value *Ctlz = Builder.CreateCall(
3757 Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctlz,
3758 {Arg->getType()}),
3759 {Arg, Builder.getFalse()}, "ctlz");
3760 Rep = Builder.CreateTrunc(Ctlz, Builder.getInt32Ty(), "ctlz.trunc");
3761 } else if (IsNVVM && Name == "popc.ll") {
3762 // llvm.nvvm.popc.ll returns an i32, but llvm.ctpop.i64 and returns an
3763 // i64.
3764 Value *Arg = CI->getArgOperand(0);
3765 Value *Popc = Builder.CreateCall(
3766 Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctpop,
3767 {Arg->getType()}),
3768 Arg, "ctpop");
3769 Rep = Builder.CreateTrunc(Popc, Builder.getInt32Ty(), "ctpop.trunc");
3770 } else if (IsNVVM && Name == "h2f") {
3771 Rep = Builder.CreateCall(Intrinsic::getDeclaration(
3772 F->getParent(), Intrinsic::convert_from_fp16,
3773 {Builder.getFloatTy()}),
3774 CI->getArgOperand(0), "h2f");
3775 } else if (IsARM) {
3776 Rep = UpgradeARMIntrinsicCall(Name, CI, F, Builder);
3777 } else {
3778 llvm_unreachable("Unknown function for CallInst upgrade.")::llvm::llvm_unreachable_internal("Unknown function for CallInst upgrade."
, "llvm/lib/IR/AutoUpgrade.cpp", 3778)
;
3779 }
3780
3781 if (Rep)
3782 CI->replaceAllUsesWith(Rep);
3783 CI->eraseFromParent();
3784 return;
3785 }
3786
3787 const auto &DefaultCase = [&NewFn, &CI]() -> void {
3788 // Handle generic mangling change, but nothing else
3789 assert((static_cast <bool> ((CI->getCalledFunction()->getName
() != NewFn->getName()) && "Unknown function for CallInst upgrade and isn't just a name change"
) ? void (0) : __assert_fail ("(CI->getCalledFunction()->getName() != NewFn->getName()) && \"Unknown function for CallInst upgrade and isn't just a name change\""
, "llvm/lib/IR/AutoUpgrade.cpp", 3791, __extension__ __PRETTY_FUNCTION__
))
3790 (CI->getCalledFunction()->getName() != NewFn->getName()) &&(static_cast <bool> ((CI->getCalledFunction()->getName
() != NewFn->getName()) && "Unknown function for CallInst upgrade and isn't just a name change"
) ? void (0) : __assert_fail ("(CI->getCalledFunction()->getName() != NewFn->getName()) && \"Unknown function for CallInst upgrade and isn't just a name change\""
, "llvm/lib/IR/AutoUpgrade.cpp", 3791, __extension__ __PRETTY_FUNCTION__
))
3791 "Unknown function for CallInst upgrade and isn't just a name change")(static_cast <bool> ((CI->getCalledFunction()->getName
() != NewFn->getName()) && "Unknown function for CallInst upgrade and isn't just a name change"
) ? void (0) : __assert_fail ("(CI->getCalledFunction()->getName() != NewFn->getName()) && \"Unknown function for CallInst upgrade and isn't just a name change\""
, "llvm/lib/IR/AutoUpgrade.cpp", 3791, __extension__ __PRETTY_FUNCTION__
))
;
3792 CI->setCalledFunction(NewFn);
3793 };
3794 CallInst *NewCall = nullptr;
3795 switch (NewFn->getIntrinsicID()) {
3796 default: {
3797 DefaultCase();
3798 return;
3799 }
3800 case Intrinsic::arm_neon_vld1:
3801 case Intrinsic::arm_neon_vld2:
3802 case Intrinsic::arm_neon_vld3:
3803 case Intrinsic::arm_neon_vld4:
3804 case Intrinsic::arm_neon_vld2lane:
3805 case Intrinsic::arm_neon_vld3lane:
3806 case Intrinsic::arm_neon_vld4lane:
3807 case Intrinsic::arm_neon_vst1:
3808 case Intrinsic::arm_neon_vst2:
3809 case Intrinsic::arm_neon_vst3:
3810 case Intrinsic::arm_neon_vst4:
3811 case Intrinsic::arm_neon_vst2lane:
3812 case Intrinsic::arm_neon_vst3lane:
3813 case Intrinsic::arm_neon_vst4lane: {
3814 SmallVector<Value *, 4> Args(CI->args());
3815 NewCall = Builder.CreateCall(NewFn, Args);
3816 break;
3817 }
3818
3819 case Intrinsic::arm_neon_bfdot:
3820 case Intrinsic::arm_neon_bfmmla:
3821 case Intrinsic::arm_neon_bfmlalb:
3822 case Intrinsic::arm_neon_bfmlalt:
3823 case Intrinsic::aarch64_neon_bfdot:
3824 case Intrinsic::aarch64_neon_bfmmla:
3825 case Intrinsic::aarch64_neon_bfmlalb:
3826 case Intrinsic::aarch64_neon_bfmlalt: {
3827 SmallVector<Value *, 3> Args;
3828 assert(CI->arg_size() == 3 &&(static_cast <bool> (CI->arg_size() == 3 && "Mismatch between function args and call args"
) ? void (0) : __assert_fail ("CI->arg_size() == 3 && \"Mismatch between function args and call args\""
, "llvm/lib/IR/AutoUpgrade.cpp", 3829, __extension__ __PRETTY_FUNCTION__
))
3829 "Mismatch between function args and call args")(static_cast <bool> (CI->arg_size() == 3 && "Mismatch between function args and call args"
) ? void (0) : __assert_fail ("CI->arg_size() == 3 && \"Mismatch between function args and call args\""
, "llvm/lib/IR/AutoUpgrade.cpp", 3829, __extension__ __PRETTY_FUNCTION__
))
;
3830 size_t OperandWidth =
3831 CI->getArgOperand(1)->getType()->getPrimitiveSizeInBits();
3832 assert((OperandWidth == 64 || OperandWidth == 128) &&(static_cast <bool> ((OperandWidth == 64 || OperandWidth
== 128) && "Unexpected operand width") ? void (0) : __assert_fail
("(OperandWidth == 64 || OperandWidth == 128) && \"Unexpected operand width\""
, "llvm/lib/IR/AutoUpgrade.cpp", 3833, __extension__ __PRETTY_FUNCTION__
))
3833 "Unexpected operand width")(static_cast <bool> ((OperandWidth == 64 || OperandWidth
== 128) && "Unexpected operand width") ? void (0) : __assert_fail
("(OperandWidth == 64 || OperandWidth == 128) && \"Unexpected operand width\""
, "llvm/lib/IR/AutoUpgrade.cpp", 3833, __extension__ __PRETTY_FUNCTION__
))
;
3834 Type *NewTy = FixedVectorType::get(Type::getBFloatTy(C), OperandWidth / 16);
3835 auto Iter = CI->args().begin();
3836 Args.push_back(*Iter++);
3837 Args.push_back(Builder.CreateBitCast(*Iter++, NewTy));
3838 Args.push_back(Builder.CreateBitCast(*Iter++, NewTy));
3839 NewCall = Builder.CreateCall(NewFn, Args);
3840 break;
3841 }
3842
3843 case Intrinsic::bitreverse:
3844 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)});
3845 break;
3846
3847 case Intrinsic::ctlz:
3848 case Intrinsic::cttz:
3849 assert(CI->arg_size() == 1 &&(static_cast <bool> (CI->arg_size() == 1 && "Mismatch between function args and call args"
) ? void (0) : __assert_fail ("CI->arg_size() == 1 && \"Mismatch between function args and call args\""
, "llvm/lib/IR/AutoUpgrade.cpp", 3850, __extension__ __PRETTY_FUNCTION__
))
3850 "Mismatch between function args and call args")(static_cast <bool> (CI->arg_size() == 1 && "Mismatch between function args and call args"
) ? void (0) : __assert_fail ("CI->arg_size() == 1 && \"Mismatch between function args and call args\""
, "llvm/lib/IR/AutoUpgrade.cpp", 3850, __extension__ __PRETTY_FUNCTION__
))
;
3851 NewCall =
3852 Builder.CreateCall(NewFn, {CI->getArgOperand(0), Builder.getFalse()});
3853 break;
3854
3855 case Intrinsic::objectsize: {
3856 Value *NullIsUnknownSize =
3857 CI->arg_size() == 2 ? Builder.getFalse() : CI->getArgOperand(2);
3858 Value *Dynamic =
3859 CI->arg_size() < 4 ? Builder.getFalse() : CI->getArgOperand(3);
3860 NewCall = Builder.CreateCall(
3861 NewFn, {CI->getArgOperand(0), CI->getArgOperand(1), NullIsUnknownSize, Dynamic});
3862 break;
3863 }
3864
3865 case Intrinsic::ctpop:
3866 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)});
3867 break;
3868
3869 case Intrinsic::convert_from_fp16:
3870 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)});
3871 break;
3872
3873 case Intrinsic::dbg_value:
3874 // Upgrade from the old version that had an extra offset argument.
3875 assert(CI->arg_size() == 4)(static_cast <bool> (CI->arg_size() == 4) ? void (0)
: __assert_fail ("CI->arg_size() == 4", "llvm/lib/IR/AutoUpgrade.cpp"
, 3875, __extension__ __PRETTY_FUNCTION__))
;
3876 // Drop nonzero offsets instead of attempting to upgrade them.
3877 if (auto *Offset = dyn_cast_or_null<Constant>(CI->getArgOperand(1)))
3878 if (Offset->isZeroValue()) {
3879 NewCall = Builder.CreateCall(
3880 NewFn,
3881 {CI->getArgOperand(0), CI->getArgOperand(2), CI->getArgOperand(3)});
3882 break;
3883 }
3884 CI->eraseFromParent();
3885 return;
3886
3887 case Intrinsic::ptr_annotation:
3888 // Upgrade from versions that lacked the annotation attribute argument.
3889 assert(CI->arg_size() == 4 &&(static_cast <bool> (CI->arg_size() == 4 && "Before LLVM 12.0 this intrinsic took four arguments"
) ? void (0) : __assert_fail ("CI->arg_size() == 4 && \"Before LLVM 12.0 this intrinsic took four arguments\""
, "llvm/lib/IR/AutoUpgrade.cpp", 3890, __extension__ __PRETTY_FUNCTION__
))
3890 "Before LLVM 12.0 this intrinsic took four arguments")(static_cast <bool> (CI->arg_size() == 4 && "Before LLVM 12.0 this intrinsic took four arguments"
) ? void (0) : __assert_fail ("CI->arg_size() == 4 && \"Before LLVM 12.0 this intrinsic took four arguments\""
, "llvm/lib/IR/AutoUpgrade.cpp", 3890, __extension__ __PRETTY_FUNCTION__
))
;
3891 // Create a new call with an added null annotation attribute argument.
3892 NewCall = Builder.CreateCall(
3893 NewFn,
3894 {CI->getArgOperand(0), CI->getArgOperand(1), CI->getArgOperand(2),
3895 CI->getArgOperand(3), Constant::getNullValue(Builder.getInt8PtrTy())});
3896 NewCall->takeName(CI);
3897 CI->replaceAllUsesWith(NewCall);
3898 CI->eraseFromParent();
3899 return;
3900
3901 case Intrinsic::var_annotation:
3902 // Upgrade from versions that lacked the annotation attribute argument.
3903 assert(CI->arg_size() == 4 &&(static_cast <bool> (CI->arg_size() == 4 && "Before LLVM 12.0 this intrinsic took four arguments"
) ? void (0) : __assert_fail ("CI->arg_size() == 4 && \"Before LLVM 12.0 this intrinsic took four arguments\""
, "llvm/lib/IR/AutoUpgrade.cpp", 3904, __extension__ __PRETTY_FUNCTION__
))
3904 "Before LLVM 12.0 this intrinsic took four arguments")(static_cast <bool> (CI->arg_size() == 4 && "Before LLVM 12.0 this intrinsic took four arguments"
) ? void (0) : __assert_fail ("CI->arg_size() == 4 && \"Before LLVM 12.0 this intrinsic took four arguments\""
, "llvm/lib/IR/AutoUpgrade.cpp", 3904, __extension__ __PRETTY_FUNCTION__
))
;
3905 // Create a new call with an added null annotation attribute argument.
3906 NewCall = Builder.CreateCall(
Value stored to 'NewCall' is never read
3907 NewFn,
3908 {CI->getArgOperand(0), CI->getArgOperand(1), CI->getArgOperand(2),
3909 CI->getArgOperand(3), Constant::getNullValue(Builder.getInt8PtrTy())});
3910 CI->eraseFromParent();
3911 return;
3912
3913 case Intrinsic::x86_xop_vfrcz_ss:
3914 case Intrinsic::x86_xop_vfrcz_sd:
3915 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(1)});
3916 break;
3917
3918 case Intrinsic::x86_xop_vpermil2pd:
3919 case Intrinsic::x86_xop_vpermil2ps:
3920 case Intrinsic::x86_xop_vpermil2pd_256:
3921 case Intrinsic::x86_xop_vpermil2ps_256: {
3922 SmallVector<Value *, 4> Args(CI->args());
3923 VectorType *FltIdxTy = cast<VectorType>(Args[2]->getType());
3924 VectorType *IntIdxTy = VectorType::getInteger(FltIdxTy);
3925 Args[2] = Builder.CreateBitCast(Args[2], IntIdxTy);
3926 NewCall = Builder.CreateCall(NewFn, Args);
3927 break;
3928 }
3929
3930 case Intrinsic::x86_sse41_ptestc:
3931 case Intrinsic::x86_sse41_ptestz:
3932 case Intrinsic::x86_sse41_ptestnzc: {
3933 // The arguments for these intrinsics used to be v4f32, and changed
3934 // to v2i64. This is purely a nop, since those are bitwise intrinsics.
3935 // So, the only thing required is a bitcast for both arguments.
3936 // First, check the arguments have the old type.
3937 Value *Arg0 = CI->getArgOperand(0);
3938 if (Arg0->getType() != FixedVectorType::get(Type::getFloatTy(C), 4))
3939 return;
3940
3941 // Old intrinsic, add bitcasts
3942 Value *Arg1 = CI->getArgOperand(1);
3943
3944 auto *NewVecTy = FixedVectorType::get(Type::getInt64Ty(C), 2);
3945
3946 Value *BC0 = Builder.CreateBitCast(Arg0, NewVecTy, "cast");
3947 Value *BC1 = Builder.CreateBitCast(Arg1, NewVecTy, "cast");
3948
3949 NewCall = Builder.CreateCall(NewFn, {BC0, BC1});
3950 break;
3951 }
3952
3953 case Intrinsic::x86_rdtscp: {
3954 // This used to take 1 arguments. If we have no arguments, it is already
3955 // upgraded.
3956 if (CI->getNumOperands() == 0)
3957 return;
3958
3959 NewCall = Builder.CreateCall(NewFn);
3960 // Extract the second result and store it.
3961 Value *Data = Builder.CreateExtractValue(NewCall, 1);
3962 // Cast the pointer to the right type.
3963 Value *Ptr = Builder.CreateBitCast(CI->getArgOperand(0),
3964 llvm::PointerType::getUnqual(Data->getType()));
3965 Builder.CreateAlignedStore(Data, Ptr, Align(1));
3966 // Replace the original call result with the first result of the new call.
3967 Value *TSC = Builder.CreateExtractValue(NewCall, 0);
3968
3969 NewCall->takeName(CI);
3970 CI->replaceAllUsesWith(TSC);
3971 CI->eraseFromParent();
3972 return;
3973 }
3974
3975 case Intrinsic::x86_sse41_insertps:
3976 case Intrinsic::x86_sse41_dppd:
3977 case Intrinsic::x86_sse41_dpps:
3978 case Intrinsic::x86_sse41_mpsadbw:
3979 case Intrinsic::x86_avx_dp_ps_256:
3980 case Intrinsic::x86_avx2_mpsadbw: {
3981 // Need to truncate the last argument from i32 to i8 -- this argument models
3982 // an inherently 8-bit immediate operand to these x86 instructions.
3983 SmallVector<Value *, 4> Args(CI->args());
3984
3985 // Replace the last argument with a trunc.
3986 Args.back() = Builder.CreateTrunc(Args.back(), Type::getInt8Ty(C), "trunc");
3987 NewCall = Builder.CreateCall(NewFn, Args);
3988 break;
3989 }
3990
3991 case Intrinsic::x86_avx512_mask_cmp_pd_128:
3992 case Intrinsic::x86_avx512_mask_cmp_pd_256:
3993 case Intrinsic::x86_avx512_mask_cmp_pd_512:
3994 case Intrinsic::x86_avx512_mask_cmp_ps_128:
3995 case Intrinsic::x86_avx512_mask_cmp_ps_256:
3996 case Intrinsic::x86_avx512_mask_cmp_ps_512: {
3997 SmallVector<Value *, 4> Args(CI->args());
3998 unsigned NumElts =
3999 cast<FixedVectorType>(Args[0]->getType())->getNumElements();
4000 Args[3] = getX86MaskVec(Builder, Args[3], NumElts);
4001
4002 NewCall = Builder.CreateCall(NewFn, Args);
4003 Value *Res = ApplyX86MaskOn1BitsVec(Builder, NewCall, nullptr);
4004
4005 NewCall->takeName(CI);
4006 CI->replaceAllUsesWith(Res);
4007 CI->eraseFromParent();
4008 return;
4009 }
4010
4011 case Intrinsic::thread_pointer: {
4012 NewCall = Builder.CreateCall(NewFn, {});
4013 break;
4014 }
4015
4016 case Intrinsic::invariant_start:
4017 case Intrinsic::invariant_end: {
4018 SmallVector<Value *, 4> Args(CI->args());
4019 NewCall = Builder.CreateCall(NewFn, Args);
4020 break;
4021 }
4022 case Intrinsic::masked_load:
4023 case Intrinsic::masked_store:
4024 case Intrinsic::masked_gather:
4025 case Intrinsic::masked_scatter: {
4026 SmallVector<Value *, 4> Args(CI->args());
4027 NewCall = Builder.CreateCall(NewFn, Args);
4028 NewCall->copyMetadata(*CI);
4029 break;
4030 }
4031
4032 case Intrinsic::memcpy:
4033 case Intrinsic::memmove:
4034 case Intrinsic::memset: {
4035 // We have to make sure that the call signature is what we're expecting.
4036 // We only want to change the old signatures by removing the alignment arg:
4037 // @llvm.mem[cpy|move]...(i8*, i8*, i[32|i64], i32, i1)
4038 // -> @llvm.mem[cpy|move]...(i8*, i8*, i[32|i64], i1)
4039 // @llvm.memset...(i8*, i8, i[32|64], i32, i1)
4040 // -> @llvm.memset...(i8*, i8, i[32|64], i1)
4041 // Note: i8*'s in the above can be any pointer type
4042 if (CI->arg_size() != 5) {
4043 DefaultCase();
4044 return;
4045 }
4046 // Remove alignment argument (3), and add alignment attributes to the
4047 // dest/src pointers.
4048 Value *Args[4] = {CI->getArgOperand(0), CI->getArgOperand(1),
4049 CI->getArgOperand(2), CI->getArgOperand(4)};
4050 NewCall = Builder.CreateCall(NewFn, Args);
4051 auto *MemCI = cast<MemIntrinsic>(NewCall);
4052 // All mem intrinsics support dest alignment.
4053 const ConstantInt *Align = cast<ConstantInt>(CI->getArgOperand(3));
4054 MemCI->setDestAlignment(Align->getMaybeAlignValue());
4055 // Memcpy/Memmove also support source alignment.
4056 if (auto *MTI = dyn_cast<MemTransferInst>(MemCI))
4057 MTI->setSourceAlignment(Align->getMaybeAlignValue());
4058 break;
4059 }
4060 }
4061 assert(NewCall && "Should have either set this variable or returned through "(static_cast <bool> (NewCall && "Should have either set this variable or returned through "
"the default case") ? void (0) : __assert_fail ("NewCall && \"Should have either set this variable or returned through \" \"the default case\""
, "llvm/lib/IR/AutoUpgrade.cpp", 4062, __extension__ __PRETTY_FUNCTION__
))
4062 "the default case")(static_cast <bool> (NewCall && "Should have either set this variable or returned through "
"the default case") ? void (0) : __assert_fail ("NewCall && \"Should have either set this variable or returned through \" \"the default case\""
, "llvm/lib/IR/AutoUpgrade.cpp", 4062, __extension__ __PRETTY_FUNCTION__
))
;
4063 NewCall->takeName(CI);
4064 CI->replaceAllUsesWith(NewCall);
4065 CI->eraseFromParent();
4066}
4067
4068void llvm::UpgradeCallsToIntrinsic(Function *F) {
4069 assert(F && "Illegal attempt to upgrade a non-existent intrinsic.")(static_cast <bool> (F && "Illegal attempt to upgrade a non-existent intrinsic."
) ? void (0) : __assert_fail ("F && \"Illegal attempt to upgrade a non-existent intrinsic.\""
, "llvm/lib/IR/AutoUpgrade.cpp", 4069, __extension__ __PRETTY_FUNCTION__
))
;
4070
4071 // Check if this function should be upgraded and get the replacement function
4072 // if there is one.
4073 Function *NewFn;
4074 if (UpgradeIntrinsicFunction(F, NewFn)) {
4075 // Replace all users of the old function with the new function or new
4076 // instructions. This is not a range loop because the call is deleted.
4077 for (User *U : make_early_inc_range(F->users()))
4078 if (CallInst *CI = dyn_cast<CallInst>(U))
4079 UpgradeIntrinsicCall(CI, NewFn);
4080
4081 // Remove old function, no longer used, from the module.
4082 F->eraseFromParent();
4083 }
4084}
4085
4086MDNode *llvm::UpgradeTBAANode(MDNode &MD) {
4087 // Check if the tag uses struct-path aware TBAA format.
4088 if (isa<MDNode>(MD.getOperand(0)) && MD.getNumOperands() >= 3)
4089 return &MD;
4090
4091 auto &Context = MD.getContext();
4092 if (MD.getNumOperands() == 3) {
4093 Metadata *Elts[] = {MD.getOperand(0), MD.getOperand(1)};
4094 MDNode *ScalarType = MDNode::get(Context, Elts);
4095 // Create a MDNode <ScalarType, ScalarType, offset 0, const>
4096 Metadata *Elts2[] = {ScalarType, ScalarType,
4097 ConstantAsMetadata::get(
4098 Constant::getNullValue(Type::getInt64Ty(Context))),
4099 MD.getOperand(2)};
4100 return MDNode::get(Context, Elts2);
4101 }
4102 // Create a MDNode <MD, MD, offset 0>
4103 Metadata *Elts[] = {&MD, &MD, ConstantAsMetadata::get(Constant::getNullValue(
4104 Type::getInt64Ty(Context)))};
4105 return MDNode::get(Context, Elts);
4106}
4107
4108Instruction *llvm::UpgradeBitCastInst(unsigned Opc, Value *V, Type *DestTy,
4109 Instruction *&Temp) {
4110 if (Opc != Instruction::BitCast)
4111 return nullptr;
4112
4113 Temp = nullptr;
4114 Type *SrcTy = V->getType();
4115 if (SrcTy->isPtrOrPtrVectorTy() && DestTy->isPtrOrPtrVectorTy() &&
4116 SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace()) {
4117 LLVMContext &Context = V->getContext();
4118
4119 // We have no information about target data layout, so we assume that
4120 // the maximum pointer size is 64bit.
4121 Type *MidTy = Type::getInt64Ty(Context);
4122 Temp = CastInst::Create(Instruction::PtrToInt, V, MidTy);
4123
4124 return CastInst::Create(Instruction::IntToPtr, Temp, DestTy);
4125 }
4126
4127 return nullptr;
4128}
4129
4130Value *llvm::UpgradeBitCastExpr(unsigned Opc, Constant *C, Type *DestTy) {
4131 if (Opc != Instruction::BitCast)
4132 return nullptr;
4133
4134 Type *SrcTy = C->getType();
4135 if (SrcTy->isPtrOrPtrVectorTy() && DestTy->isPtrOrPtrVectorTy() &&
4136 SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace()) {
4137 LLVMContext &Context = C->getContext();
4138
4139 // We have no information about target data layout, so we assume that
4140 // the maximum pointer size is 64bit.
4141 Type *MidTy = Type::getInt64Ty(Context);
4142
4143 return ConstantExpr::getIntToPtr(ConstantExpr::getPtrToInt(C, MidTy),
4144 DestTy);
4145 }
4146
4147 return nullptr;
4148}
4149
4150/// Check the debug info version number, if it is out-dated, drop the debug
4151/// info. Return true if module is modified.
4152bool llvm::UpgradeDebugInfo(Module &M) {
4153 unsigned Version = getDebugMetadataVersionFromModule(M);
4154 if (Version == DEBUG_METADATA_VERSION) {
4155 bool BrokenDebugInfo = false;
4156 if (verifyModule(M, &llvm::errs(), &BrokenDebugInfo))
4157 report_fatal_error("Broken module found, compilation aborted!");
4158 if (!BrokenDebugInfo)
4159 // Everything is ok.
4160 return false;
4161 else {
4162 // Diagnose malformed debug info.
4163 DiagnosticInfoIgnoringInvalidDebugMetadata Diag(M);
4164 M.getContext().diagnose(Diag);
4165 }
4166 }
4167 bool Modified = StripDebugInfo(M);
4168 if (Modified && Version != DEBUG_METADATA_VERSION) {
4169 // Diagnose a version mismatch.
4170 DiagnosticInfoDebugMetadataVersion DiagVersion(M, Version);
4171 M.getContext().diagnose(DiagVersion);
4172 }
4173 return Modified;
4174}
4175
4176/// This checks for objc retain release marker which should be upgraded. It
4177/// returns true if module is modified.
4178static bool UpgradeRetainReleaseMarker(Module &M) {
4179 bool Changed = false;
4180 const char *MarkerKey = "clang.arc.retainAutoreleasedReturnValueMarker";
4181 NamedMDNode *ModRetainReleaseMarker = M.getNamedMetadata(MarkerKey);
4182 if (ModRetainReleaseMarker) {
4183 MDNode *Op = ModRetainReleaseMarker->getOperand(0);
4184 if (Op) {
4185 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(0));
4186 if (ID) {
4187 SmallVector<StringRef, 4> ValueComp;
4188 ID->getString().split(ValueComp, "#");
4189 if (ValueComp.size() == 2) {
4190 std::string NewValue = ValueComp[0].str() + ";" + ValueComp[1].str();
4191 ID = MDString::get(M.getContext(), NewValue);
4192 }
4193 M.addModuleFlag(Module::Error, MarkerKey, ID);
4194 M.eraseNamedMetadata(ModRetainReleaseMarker);
4195 Changed = true;
4196 }
4197 }
4198 }
4199 return Changed;
4200}
4201
4202void llvm::UpgradeARCRuntime(Module &M) {
4203 // This lambda converts normal function calls to ARC runtime functions to
4204 // intrinsic calls.
4205 auto UpgradeToIntrinsic = [&](const char *OldFunc,
4206 llvm::Intrinsic::ID IntrinsicFunc) {
4207 Function *Fn = M.getFunction(OldFunc);
4208
4209 if (!Fn)
4210 return;
4211
4212 Function *NewFn = llvm::Intrinsic::getDeclaration(&M, IntrinsicFunc);
4213
4214 for (User *U : make_early_inc_range(Fn->users())) {
4215 CallInst *CI = dyn_cast<CallInst>(U);
4216 if (!CI || CI->getCalledFunction() != Fn)
4217 continue;
4218
4219 IRBuilder<> Builder(CI->getParent(), CI->getIterator());
4220 FunctionType *NewFuncTy = NewFn->getFunctionType();
4221 SmallVector<Value *, 2> Args;
4222
4223 // Don't upgrade the intrinsic if it's not valid to bitcast the return
4224 // value to the return type of the old function.
4225 if (NewFuncTy->getReturnType() != CI->getType() &&
4226 !CastInst::castIsValid(Instruction::BitCast, CI,
4227 NewFuncTy->getReturnType()))
4228 continue;
4229
4230 bool InvalidCast = false;
4231
4232 for (unsigned I = 0, E = CI->arg_size(); I != E; ++I) {
4233 Value *Arg = CI->getArgOperand(I);
4234
4235 // Bitcast argument to the parameter type of the new function if it's
4236 // not a variadic argument.
4237 if (I < NewFuncTy->getNumParams()) {
4238 // Don't upgrade the intrinsic if it's not valid to bitcast the argument
4239 // to the parameter type of the new function.
4240 if (!CastInst::castIsValid(Instruction::BitCast, Arg,
4241 NewFuncTy->getParamType(I))) {
4242 InvalidCast = true;
4243 break;
4244 }
4245 Arg = Builder.CreateBitCast(Arg, NewFuncTy->getParamType(I));
4246 }
4247 Args.push_back(Arg);
4248 }
4249
4250 if (InvalidCast)
4251 continue;
4252
4253 // Create a call instruction that calls the new function.
4254 CallInst *NewCall = Builder.CreateCall(NewFuncTy, NewFn, Args);
4255 NewCall->setTailCallKind(cast<CallInst>(CI)->getTailCallKind());
4256 NewCall->takeName(CI);
4257
4258 // Bitcast the return value back to the type of the old call.
4259 Value *NewRetVal = Builder.CreateBitCast(NewCall, CI->getType());
4260
4261 if (!CI->use_empty())
4262 CI->replaceAllUsesWith(NewRetVal);
4263 CI->eraseFromParent();
4264 }
4265
4266 if (Fn->use_empty())
4267 Fn->eraseFromParent();
4268 };
4269
4270 // Unconditionally convert a call to "clang.arc.use" to a call to
4271 // "llvm.objc.clang.arc.use".
4272 UpgradeToIntrinsic("clang.arc.use", llvm::Intrinsic::objc_clang_arc_use);
4273
4274 // Upgrade the retain release marker. If there is no need to upgrade
4275 // the marker, that means either the module is already new enough to contain
4276 // new intrinsics or it is not ARC. There is no need to upgrade runtime call.
4277 if (!UpgradeRetainReleaseMarker(M))
4278 return;
4279
4280 std::pair<const char *, llvm::Intrinsic::ID> RuntimeFuncs[] = {
4281 {"objc_autorelease", llvm::Intrinsic::objc_autorelease},
4282 {"objc_autoreleasePoolPop", llvm::Intrinsic::objc_autoreleasePoolPop},
4283 {"objc_autoreleasePoolPush", llvm::Intrinsic::objc_autoreleasePoolPush},
4284 {"objc_autoreleaseReturnValue",
4285 llvm::Intrinsic::objc_autoreleaseReturnValue},
4286 {"objc_copyWeak", llvm::Intrinsic::objc_copyWeak},
4287 {"objc_destroyWeak", llvm::Intrinsic::objc_destroyWeak},
4288 {"objc_initWeak", llvm::Intrinsic::objc_initWeak},
4289 {"objc_loadWeak", llvm::Intrinsic::objc_loadWeak},
4290 {"objc_loadWeakRetained", llvm::Intrinsic::objc_loadWeakRetained},
4291 {"objc_moveWeak", llvm::Intrinsic::objc_moveWeak},
4292 {"objc_release", llvm::Intrinsic::objc_release},
4293 {"objc_retain", llvm::Intrinsic::objc_retain},
4294 {"objc_retainAutorelease", llvm::Intrinsic::objc_retainAutorelease},
4295 {"objc_retainAutoreleaseReturnValue",
4296 llvm::Intrinsic::objc_retainAutoreleaseReturnValue},
4297 {"objc_retainAutoreleasedReturnValue",
4298 llvm::Intrinsic::objc_retainAutoreleasedReturnValue},
4299 {"objc_retainBlock", llvm::Intrinsic::objc_retainBlock},
4300 {"objc_storeStrong", llvm::Intrinsic::objc_storeStrong},
4301 {"objc_storeWeak", llvm::Intrinsic::objc_storeWeak},
4302 {"objc_unsafeClaimAutoreleasedReturnValue",
4303 llvm::Intrinsic::objc_unsafeClaimAutoreleasedReturnValue},
4304 {"objc_retainedObject", llvm::Intrinsic::objc_retainedObject},
4305 {"objc_unretainedObject", llvm::Intrinsic::objc_unretainedObject},
4306 {"objc_unretainedPointer", llvm::Intrinsic::objc_unretainedPointer},
4307 {"objc_retain_autorelease", llvm::Intrinsic::objc_retain_autorelease},
4308 {"objc_sync_enter", llvm::Intrinsic::objc_sync_enter},
4309 {"objc_sync_exit", llvm::Intrinsic::objc_sync_exit},
4310 {"objc_arc_annotation_topdown_bbstart",
4311 llvm::Intrinsic::objc_arc_annotation_topdown_bbstart},
4312 {"objc_arc_annotation_topdown_bbend",
4313 llvm::Intrinsic::objc_arc_annotation_topdown_bbend},
4314 {"objc_arc_annotation_bottomup_bbstart",
4315 llvm::Intrinsic::objc_arc_annotation_bottomup_bbstart},
4316 {"objc_arc_annotation_bottomup_bbend",
4317 llvm::Intrinsic::objc_arc_annotation_bottomup_bbend}};
4318
4319 for (auto &I : RuntimeFuncs)
4320 UpgradeToIntrinsic(I.first, I.second);
4321}
4322
4323bool llvm::UpgradeModuleFlags(Module &M) {
4324 NamedMDNode *ModFlags = M.getModuleFlagsMetadata();
4325 if (!ModFlags)
4326 return false;
4327
4328 bool HasObjCFlag = false, HasClassProperties = false, Changed = false;
4329 bool HasSwiftVersionFlag = false;
4330 uint8_t SwiftMajorVersion, SwiftMinorVersion;
4331 uint32_t SwiftABIVersion;
4332 auto Int8Ty = Type::getInt8Ty(M.getContext());
4333 auto Int32Ty = Type::getInt32Ty(M.getContext());
4334
4335 for (unsigned I = 0, E = ModFlags->getNumOperands(); I != E; ++I) {
4336 MDNode *Op = ModFlags->getOperand(I);
4337 if (Op->getNumOperands() != 3)
4338 continue;
4339 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
4340 if (!ID)
4341 continue;
4342 if (ID->getString() == "Objective-C Image Info Version")
4343 HasObjCFlag = true;
4344 if (ID->getString() == "Objective-C Class Properties")
4345 HasClassProperties = true;
4346 // Upgrade PIC/PIE Module Flags. The module flag behavior for these two
4347 // field was Error and now they are Max.
4348 if (ID->getString() == "PIC Level" || ID->getString() == "PIE Level") {
4349 if (auto *Behavior =
4350 mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0))) {
4351 if (Behavior->getLimitedValue() == Module::Error) {
4352 Type *Int32Ty = Type::getInt32Ty(M.getContext());
4353 Metadata *Ops[3] = {
4354 ConstantAsMetadata::get(ConstantInt::get(Int32Ty, Module::Max)),
4355 MDString::get(M.getContext(), ID->getString()),
4356 Op->getOperand(2)};
4357 ModFlags->setOperand(I, MDNode::get(M.getContext(), Ops));
4358 Changed = true;
4359 }
4360 }
4361 }
4362 // Upgrade Objective-C Image Info Section. Removed the whitespce in the
4363 // section name so that llvm-lto will not complain about mismatching
4364 // module flags that is functionally the same.
4365 if (ID->getString() == "Objective-C Image Info Section") {
4366 if (auto *Value = dyn_cast_or_null<MDString>(Op->getOperand(2))) {
4367 SmallVector<StringRef, 4> ValueComp;
4368 Value->getString().split(ValueComp, " ");
4369 if (ValueComp.size() != 1) {
4370 std::string NewValue;
4371 for (auto &S : ValueComp)
4372 NewValue += S.str();
4373 Metadata *Ops[3] = {Op->getOperand(0), Op->getOperand(1),
4374 MDString::get(M.getContext(), NewValue)};
4375 ModFlags->setOperand(I, MDNode::get(M.getContext(), Ops));
4376 Changed = true;
4377 }
4378 }
4379 }
4380
4381 // IRUpgrader turns a i32 type "Objective-C Garbage Collection" into i8 value.
4382 // If the higher bits are set, it adds new module flag for swift info.
4383 if (ID->getString() == "Objective-C Garbage Collection") {
4384 auto Md = dyn_cast<ConstantAsMetadata>(Op->getOperand(2));
4385 if (Md) {
4386 assert(Md->getValue() && "Expected non-empty metadata")(static_cast <bool> (Md->getValue() && "Expected non-empty metadata"
) ? void (0) : __assert_fail ("Md->getValue() && \"Expected non-empty metadata\""
, "llvm/lib/IR/AutoUpgrade.cpp", 4386, __extension__ __PRETTY_FUNCTION__
))
;
4387 auto Type = Md->getValue()->getType();
4388 if (Type == Int8Ty)
4389 continue;
4390 unsigned Val = Md->getValue()->getUniqueInteger().getZExtValue();
4391 if ((Val & 0xff) != Val) {
4392 HasSwiftVersionFlag = true;
4393 SwiftABIVersion = (Val & 0xff00) >> 8;
4394 SwiftMajorVersion = (Val & 0xff000000) >> 24;
4395 SwiftMinorVersion = (Val & 0xff0000) >> 16;
4396 }
4397 Metadata *Ops[3] = {
4398 ConstantAsMetadata::get(ConstantInt::get(Int32Ty,Module::Error)),
4399 Op->getOperand(1),
4400 ConstantAsMetadata::get(ConstantInt::get(Int8Ty,Val & 0xff))};
4401 ModFlags->setOperand(I, MDNode::get(M.getContext(), Ops));
4402 Changed = true;
4403 }
4404 }
4405 }
4406
4407 // "Objective-C Class Properties" is recently added for Objective-C. We
4408 // upgrade ObjC bitcodes to contain a "Objective-C Class Properties" module
4409 // flag of value 0, so we can correclty downgrade this flag when trying to
4410 // link an ObjC bitcode without this module flag with an ObjC bitcode with
4411 // this module flag.
4412 if (HasObjCFlag && !HasClassProperties) {
4413 M.addModuleFlag(llvm::Module::Override, "Objective-C Class Properties",
4414 (uint32_t)0);
4415 Changed = true;
4416 }
4417
4418 if (HasSwiftVersionFlag) {
4419 M.addModuleFlag(Module::Error, "Swift ABI Version",
4420 SwiftABIVersion);
4421 M.addModuleFlag(Module::Error, "Swift Major Version",
4422 ConstantInt::get(Int8Ty, SwiftMajorVersion));
4423 M.addModuleFlag(Module::Error, "Swift Minor Version",
4424 ConstantInt::get(Int8Ty, SwiftMinorVersion));
4425 Changed = true;
4426 }
4427
4428 return Changed;
4429}
4430
4431void llvm::UpgradeSectionAttributes(Module &M) {
4432 auto TrimSpaces = [](StringRef Section) -> std::string {
4433 SmallVector<StringRef, 5> Components;
4434 Section.split(Components, ',');
4435
4436 SmallString<32> Buffer;
4437 raw_svector_ostream OS(Buffer);
4438
4439 for (auto Component : Components)
4440 OS << ',' << Component.trim();
4441
4442 return std::string(OS.str().substr(1));
4443 };
4444
4445 for (auto &GV : M.globals()) {
4446 if (!GV.hasSection())
4447 continue;
4448
4449 StringRef Section = GV.getSection();
4450
4451 if (!Section.startswith("__DATA, __objc_catlist"))
4452 continue;
4453
4454 // __DATA, __objc_catlist, regular, no_dead_strip
4455 // __DATA,__objc_catlist,regular,no_dead_strip
4456 GV.setSection(TrimSpaces(Section));
4457 }
4458}
4459
4460namespace {
4461// Prior to LLVM 10.0, the strictfp attribute could be used on individual
4462// callsites within a function that did not also have the strictfp attribute.
4463// Since 10.0, if strict FP semantics are needed within a function, the
4464// function must have the strictfp attribute and all calls within the function
4465// must also have the strictfp attribute. This latter restriction is
4466// necessary to prevent unwanted libcall simplification when a function is
4467// being cloned (such as for inlining).
4468//
4469// The "dangling" strictfp attribute usage was only used to prevent constant
4470// folding and other libcall simplification. The nobuiltin attribute on the
4471// callsite has the same effect.
4472struct StrictFPUpgradeVisitor : public InstVisitor<StrictFPUpgradeVisitor> {
4473 StrictFPUpgradeVisitor() {}
4474
4475 void visitCallBase(CallBase &Call) {
4476 if (!Call.isStrictFP())
4477 return;
4478 if (isa<ConstrainedFPIntrinsic>(&Call))
4479 return;
4480 // If we get here, the caller doesn't have the strictfp attribute
4481 // but this callsite does. Replace the strictfp attribute with nobuiltin.
4482 Call.removeFnAttr(Attribute::StrictFP);
4483 Call.addFnAttr(Attribute::NoBuiltin);
4484 }
4485};
4486} // namespace
4487
4488void llvm::UpgradeFunctionAttributes(Function &F) {
4489 // If a function definition doesn't have the strictfp attribute,
4490 // convert any callsite strictfp attributes to nobuiltin.
4491 if (!F.isDeclaration() && !F.hasFnAttribute(Attribute::StrictFP)) {
4492 StrictFPUpgradeVisitor SFPV;
4493 SFPV.visit(F);
4494 }
4495
4496 if (F.getCallingConv() == CallingConv::X86_INTR &&
4497 !F.arg_empty() && !F.hasParamAttribute(0, Attribute::ByVal)) {
4498 Type *ByValTy = cast<PointerType>(F.getArg(0)->getType())->getElementType();
4499 Attribute NewAttr = Attribute::getWithByValType(F.getContext(), ByValTy);
4500 F.addParamAttr(0, NewAttr);
4501 }
4502
4503 // Remove all incompatibile attributes from function.
4504 F.removeRetAttrs(AttributeFuncs::typeIncompatible(F.getReturnType()));
4505 for (auto &Arg : F.args())
4506 Arg.removeAttrs(AttributeFuncs::typeIncompatible(Arg.getType()));
4507}
4508
4509static bool isOldLoopArgument(Metadata *MD) {
4510 auto *T = dyn_cast_or_null<MDTuple>(MD);
4511 if (!T)
4512 return false;
4513 if (T->getNumOperands() < 1)
4514 return false;
4515 auto *S = dyn_cast_or_null<MDString>(T->getOperand(0));
4516 if (!S)
4517 return false;
4518 return S->getString().startswith("llvm.vectorizer.");
4519}
4520
4521static MDString *upgradeLoopTag(LLVMContext &C, StringRef OldTag) {
4522 StringRef OldPrefix = "llvm.vectorizer.";
4523 assert(OldTag.startswith(OldPrefix) && "Expected old prefix")(static_cast <bool> (OldTag.startswith(OldPrefix) &&
"Expected old prefix") ? void (0) : __assert_fail ("OldTag.startswith(OldPrefix) && \"Expected old prefix\""
, "llvm/lib/IR/AutoUpgrade.cpp", 4523, __extension__ __PRETTY_FUNCTION__
))
;
4524
4525 if (OldTag == "llvm.vectorizer.unroll")
4526 return MDString::get(C, "llvm.loop.interleave.count");
4527
4528 return MDString::get(
4529 C, (Twine("llvm.loop.vectorize.") + OldTag.drop_front(OldPrefix.size()))
4530 .str());
4531}
4532
4533static Metadata *upgradeLoopArgument(Metadata *MD) {
4534 auto *T = dyn_cast_or_null<MDTuple>(MD);
4535 if (!T)
4536 return MD;
4537 if (T->getNumOperands() < 1)
4538 return MD;
4539 auto *OldTag = dyn_cast_or_null<MDString>(T->getOperand(0));
4540 if (!OldTag)
4541 return MD;
4542 if (!OldTag->getString().startswith("llvm.vectorizer."))
4543 return MD;
4544
4545 // This has an old tag. Upgrade it.
4546 SmallVector<Metadata *, 8> Ops;
4547 Ops.reserve(T->getNumOperands());
4548 Ops.push_back(upgradeLoopTag(T->getContext(), OldTag->getString()));
4549 for (unsigned I = 1, E = T->getNumOperands(); I != E; ++I)
4550 Ops.push_back(T->getOperand(I));
4551
4552 return MDTuple::get(T->getContext(), Ops);
4553}
4554
4555MDNode *llvm::upgradeInstructionLoopAttachment(MDNode &N) {
4556 auto *T = dyn_cast<MDTuple>(&N);
4557 if (!T)
4558 return &N;
4559
4560 if (none_of(T->operands(), isOldLoopArgument))
4561 return &N;
4562
4563 SmallVector<Metadata *, 8> Ops;
4564 Ops.reserve(T->getNumOperands());
4565 for (Metadata *MD : T->operands())
4566 Ops.push_back(upgradeLoopArgument(MD));
4567
4568 return MDTuple::get(T->getContext(), Ops);
4569}
4570
4571std::string llvm::UpgradeDataLayoutString(StringRef DL, StringRef TT) {
4572 Triple T(TT);
4573 // For AMDGPU we uprgrade older DataLayouts to include the default globals
4574 // address space of 1.
4575 if (T.isAMDGPU() && !DL.contains("-G") && !DL.startswith("G")) {
4576 return DL.empty() ? std::string("G1") : (DL + "-G1").str();
4577 }
4578
4579 std::string AddrSpaces = "-p270:32:32-p271:32:32-p272:64:64";
4580 // If X86, and the datalayout matches the expected format, add pointer size
4581 // address spaces to the datalayout.
4582 if (!T.isX86() || DL.contains(AddrSpaces))
4583 return std::string(DL);
4584
4585 SmallVector<StringRef, 4> Groups;
4586 Regex R("(e-m:[a-z](-p:32:32)?)(-[if]64:.*$)");
4587 if (!R.match(DL, &Groups))
4588 return std::string(DL);
4589
4590 return (Groups[1] + AddrSpaces + Groups[3]).str();
4591}
4592
4593void llvm::UpgradeAttributes(AttrBuilder &B) {
4594 StringRef FramePointer;
4595 Attribute A = B.getAttribute("no-frame-pointer-elim");
4596 if (A.isValid()) {
4597 // The value can be "true" or "false".
4598 FramePointer = A.getValueAsString() == "true" ? "all" : "none";
4599 B.removeAttribute("no-frame-pointer-elim");
4600 }
4601 if (B.contains("no-frame-pointer-elim-non-leaf")) {
4602 // The value is ignored. "no-frame-pointer-elim"="true" takes priority.
4603 if (FramePointer != "all")
4604 FramePointer = "non-leaf";
4605 B.removeAttribute("no-frame-pointer-elim-non-leaf");
4606 }
4607 if (!FramePointer.empty())
4608 B.addAttribute("frame-pointer", FramePointer);
4609
4610 A = B.getAttribute("null-pointer-is-valid");
4611 if (A.isValid()) {
4612 // The value can be "true" or "false".
4613 bool NullPointerIsValid = A.getValueAsString() == "true";
4614 B.removeAttribute("null-pointer-is-valid");
4615 if (NullPointerIsValid)
4616 B.addAttribute(Attribute::NullPointerIsValid);
4617 }
4618}